aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorLarry Bassel <lbassel@codeaurora.org>2013-10-11 15:36:02 -0700
committerSimon Shields <keepcalm444@gmail.com>2016-06-12 21:19:37 +1000
commit2d2bf2aa3ee28795f6a3bc69b1de58e777e22f07 (patch)
tree212bb003e5b4136274c8138eb3c43b9d2ec0aed0 /drivers/staging
parent125790986c7fef051c65aff2ef71c5ea727073ab (diff)
downloadkernel_samsung_smdk4412-2d2bf2aa3ee28795f6a3bc69b1de58e777e22f07.tar.gz
kernel_samsung_smdk4412-2d2bf2aa3ee28795f6a3bc69b1de58e777e22f07.tar.bz2
kernel_samsung_smdk4412-2d2bf2aa3ee28795f6a3bc69b1de58e777e22f07.zip
zram: use 3.10 version of zram
commit 25eeb667599b192ea850a062d69383ee864c06ab Author: Wanpeng Li <liwanp@linux.vnet.ibm.com> Date: Wed Mar 13 15:06:16 2013 +0800 zram: fix zram_bvec_read duplicate dump failure message and stat accumulation When zram decompress fails, the code unnecessarily dumps failure messages and does stat accumulation in function zram_decompress_page(), this work is already done in function zram_decompress_page, the patch skips the redundant work. Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 78110bb8dc4a7ff331bfa3cfe7d4e287cfb3f22b Author: Joe Perches <joe@perches.com> Date: Mon Feb 11 09:41:29 2013 -0800 staging: Remove unnecessary OOM messages alloc failures already get standardized OOM messages and a dump_stack. For the affected mallocs around these OOM messages: Converted kzallocs with multiplies to kcalloc. Converted kmallocs with multiplies to kmalloc_array. Converted a kmalloc/strlen/strncpy to kstrdup. Moved a spin_lock below a removed OOM message and removed a now unnecessary spin_unlock. Neatened alignment and whitespace. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 1e927711c5ecabe76010ed9249f8f3747829f04f Author: Fengguang Wu <fengguang.wu@intel.com> Date: Fri Feb 8 10:15:10 2013 +0800 staging: zram: __zram_reset_device() can be static Signed-off-by: Fengguang Wu <fengguang.wu@intel.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 8b3cc3edb745cbc131560d19e3f32d81c07454b1 Author: Minchan Kim <minchan@kernel.org> Date: Wed Feb 6 08:48:53 2013 +0900 zram: get rid of lockdep warning Lockdep complains about recursive deadlock of zram->init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. [1] : zram: force disksize setting before using zram Acked-by: Jerome Marchand <jmarchan@redhat.com> Acked-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 152bce6bdd6e6866ff83166ac75177d001c4360d Author: Minchan Kim <minchan@kernel.org> Date: Wed Feb 6 08:45:22 2013 +0900 zram: fix warning of print format kbuild bot whinges due to print format mistmatch caused by zram: force disksize setting before using zram. This patch fixes it. Reported-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 3de738cd30306f754ea35d35b5dad29fdbec84c9 Author: Minchan Kim <minchan@kernel.org> Date: Wed Jan 30 11:41:41 2013 +0900 zram: give up lazy initialization of zram metadata 1) User of zram normally do mkfs.xxx or mkswap before using the zram block device(ex, normally, do it at booting time) It ends up allocating such metadata of zram before real usage so benefit of lazy initialzation would be mitigated. 2) Some user want to use zram when memory pressure is high.(ie, load zram dynamically, NOT booting time). It does make sense because people don't want to waste memory until memory pressure is high(ie, where zram is really helpful time). In this case, lazy initialzation could be failed easily because we will use GFP_NOIO instead of GFP_KERNEL for avoiding deadlock. So the benefit of lazy initialzation would be mitigated, too. 3) Metadata overhead is not critical and Nitin has a plan to diet it. 4K : 12 byte(64bit machine) -> 64G : 192M so 0.3% isn't big overhead If insane user use such big zram device up to 20, it could consume 6% of ram but efficieny of zram will cover the waste. So this patch gives up lazy initialization and instead we initialize metadata at disksize setting time. Acked-by: Jerome Marchand <jmarchand@redhat.com> Acked-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 0231c403bb065307493fe997ad170487b4d55eb8 Author: Minchan Kim <minchan@kernel.org> Date: Wed Jan 30 11:41:40 2013 +0900 zram: force disksize setting before using zram Now zram document syas "set disksize is optional" but partly it's wrong. When you try to use zram firstly after booting, you must set disksize, otherwise zram can't work because zram gendisk's size is 0. But once you do it, you can use zram freely after reset because reset doesn't reset to zero paradoxically. So in this time, disksize setting is optional.:( It's inconsitent for user behavior and not straightforward. This patch forces always setting disksize firstly before using zram. Yes. It changes current behavior so someone could complain when he upgrades zram. Apparently it could be a problem if zram is mainline but it still lives in staging so behavior could be changed for right way to go. Let them excuse. Acked-by: Jerome Marchand <jmarchand@redhat.com> Acked-by: Nitin Gupta <ngupta@vflare.org> Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 7e5a5104c6af709a8d97d5f4711e7c917761d464 Author: Minchan Kim <minchan@kernel.org> Date: Wed Jan 30 11:41:39 2013 +0900 zram: Fix deadlock bug in partial read/write Now zram allocates new page with GFP_KERNEL in zram I/O path if IO is partial. Unfortunately, It may cause deadlock with reclaim path like below. write_page from fs fs_lock allocation(GFP_KERNEL) reclaim pageout write_page from fs fs_lock <-- deadlock This patch fixes it by using GFP_NOIO. In read path, we reorganize code flow so that kmap_atomic is called after the GFP_NOIO allocation. Cc: stable@vger.kernel.org Acked-by: Jerome Marchand <jmarchand@redhat.com> Acked-by: Nitin Gupta <ngupta@vflare.org> [ penberg@kernel.org: don't use GFP_ATOMIC ] Signed-off-by: Pekka Enberg <penberg@kernel.org> Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 0d145a501778042d0411c843ed5b468b41f8a171 Author: Seth Jennings <sjenning@linux.vnet.ibm.com> Date: Wed Jan 30 09:36:52 2013 -0600 staging: zsmalloc: remove unused pool name zs_create_pool() currently takes a name argument which is never used in any useful way. This patch removes it. Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Acked-by: Nitin Gupta <ngupta@vflare.org> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 8f5f90a872c38b4e78f3cc95e8a25434b98e4db2 Merge: c0cd2da 949db15 Author: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Date: Fri Jan 25 21:25:02 2013 -0800 Merge 3.8-rc5 into staging-next This resolves a merge issue with a iio driver, and the zram code. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit d178a07c4bd380492b6aca9e5d3985c19ca88fdb Author: Davidlohr Bueso <davidlohr.bueso@hp.com> Date: Tue Jan 1 21:24:29 2013 -0800 staging: zram: drop zram_stat_dec/inc functions It seems like an overkill to have adding and subtracting 1 functions from the 32bit counters. Just do it directly. Signed-off-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit cad683fb9d53fa2cbcf82660e663fa1180f86797 Author: Davidlohr Bueso <davidlohr.bueso@hp.com> Date: Tue Jan 1 21:24:22 2013 -0800 staging: zram: show correct disksize The ->disksize variable stores values in units of bytes, print the correct size in Kb Signed-off-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit ca3d70bd68455133eabcb8a0ae1b40254d87188b Author: Davidlohr Bueso <davidlohr.bueso@hp.com> Date: Tue Jan 1 21:24:13 2013 -0800 staging: zram: simplify num_devices paramater Simplify dealing with num_devices when initializing zram. Also cleanup some of the output messages. Signed-off-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 397c60668aa5ae7130b5ad4e73870d7b8a787085 Author: Nitin Gupta <ngupta@vflare.org> Date: Wed Jan 2 08:53:41 2013 -0800 staging: zram: fix invalid memory references during disk write Fixes a bug introduced by commit c8f2f0db1 ("zram: Fix handling of incompressible pages") which caused invalid memory references during disk write. Invalid references could occur in two cases: - Incoming data expands on compression: In this case, reference was made to kunmap()'ed bio page. - Partial (non PAGE_SIZE) write with incompressible data: In this case, reference was made to a kfree()'ed buffer. Fixes bug 50081: https://bugzilla.kernel.org/show_bug.cgi?id=50081 Signed-off-by: Nitin Gupta <ngupta@vflare.org> Cc: stable <stable@vger.kernel.org> Reported-by: Mihail Kasadjikov <hamer.mk@gmail.com> Reported-by: Tomas M <tomas@slax.org> Reviewed-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit dbc320f7490933eb6a073d82aceac75d5e7ce82b Author: Masanari Iida <standby24x7@gmail.com> Date: Mon Jan 7 23:28:10 2013 +0900 staging: Add angle bracket before and after the URL Add missing angle bracket before and after the URL. Signed-off-by: Masanari Iida <standby24x7@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 26907840e63b3187266db1865e76ea9e98b01b19 Author: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Date: Tue Oct 30 22:42:31 2012 +0300 staging: zram: handle mem suffixes in disk size zram_sysfs parameter Use memparse() to allow mem suffixes in disksize sysfs number. Examples: echo 256K > /sys/block/zram0/disksize echo 512M > /sys/block/zram0/disksize echo 1G > /sys/block/zram0/disksize Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 37b51fdddf64e7ba0971d070428655f8d6f36578 Author: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Date: Tue Oct 30 22:40:23 2012 +0300 staging: zram: factor-out zram_decompress_page() function zram_bvec_read() shared decompress functionality with zram_read_before_write() function. Factor-out and make commonly used zram_decompress_page() function, which also simplified error handling in zram_bvec_read(). Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit cb1f6268acd7f1bca7153fa9ca187ffb73f60ab8 Merge: d9ff393 8f0d816 Author: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Date: Mon Oct 29 08:37:12 2012 -0700 Merge 3.7-rc3 into staging-next This resolves the conflict with: drivers/staging/comedi/drivers/amplc_dio200.c and syncs up the changes that happened in the staging directory for 3.7-rc3. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit c8f2f0db1d0294aaf37e8a85bea9bbc4aaf5c0fe Author: Nitin Gupta <ngupta@vflare.org> Date: Wed Oct 10 17:42:18 2012 -0700 staging: zram: Fix handling of incompressible pages Change 130f315a (staging: zram: remove special handle of uncompressed page) introduced a bug in the handling of incompressible pages which resulted in memory allocation failure for such pages. When a page expands on compression, say from 4K to 4K+30, we were trying to do zsmalloc(pool, 4K+30). However, the maximum size which zsmalloc can allocate is PAGE_SIZE (for obvious reasons), so such allocation requests always return failure (0). For a page that has compressed size larger than the original size (this may happen with already compressed or random data), there is no point storing the compressed version as that would take more space and would also require time for decompression when needed again. So, the fix is to store any page, whose compressed size exceeds a threshold (max_zpage_size), as-it-is i.e. without compression. Memory required for storing this uncompressed page can then be requested from zsmalloc which supports PAGE_SIZE sized allocations. Lastly, the fix checks that we do not attempt to "decompress" the page which we stored in the uncompressed form -- we just memcpy() out such pages. Signed-off-by: Nitin Gupta <ngupta@vflare.org> Reported-by: viechweg@gmail.com Reported-by: paerley@gmail.com Reported-by: wu.tommy@gmail.com Acked-by: Minchan Kim <minchan@kernel.org> Cc: stable <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 55dcbbb1bf7eef83bcd3e0ba8de0b359a45804ed Author: Minchan Kim <minchan@kernel.org> Date: Wed Oct 10 08:49:52 2012 +0900 staging: zram: correct obsolete comment on max_zpage_size Zram doesn't use xv_malloc any more so it doesn't have limitation about zobj_header. Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit b74185108668ef966e663878adbad65e03bfcb43 Author: Seth Jennings <sjenning@linux.vnet.ibm.com> Date: Mon Jul 2 16:15:52 2012 -0500 staging: zsmalloc: add mapping modes This patch improves mapping performance in zsmalloc by getting usage information from the user in the form of a "mapping mode" and using it to avoid unnecessary copying for objects that span pages. Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 6e2361720b9da9ec830d407da058ca1827e62b12 Author: Seth Jennings <sjenning@linux.vnet.ibm.com> Date: Mon Jun 25 11:14:36 2012 -0500 staging: zram/zcache: swtich Kconfig dependency from X86 to ZSMALLOC This patch switches zcache and zram dependency to ZSMALLOC rather than X86. There is no net change since ZSMALLOC depends on X86, however, this prevent further changes to these files as zsmalloc dependencies change. Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 80677c2538283678c7b2ef7ebbd653a3fa54d086 Author: Sam Hansen <solid.se7en@gmail.com> Date: Thu Jun 7 16:03:48 2012 -0700 staging: zram: conventions, __aligned() attribute Using the __aligned() attribute in favor of __attribute__((aligned(size))) Signed-off-by: Sam Hansen <solid.se7en@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 94b8435ff4d46dde75173da45564b0d65889dc64 Author: Sam Hansen <solid.se7en@gmail.com> Date: Thu Jun 7 16:03:47 2012 -0700 staging: zram: conventions pr_warning -> pr_warn() Porting zram to use the pr_warn() function instead of the deprecated pr_warning(). Signed-off-by: Sam Hansen <solid.se7en@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 130f315a174d127cbb90d4d1a4a7088dbcf930b5 Author: Minchan Kim <minchan@kernel.org> Date: Fri Jun 8 15:39:27 2012 +0900 staging: zram: remove special handle of uncompressed page xvmalloc can't handle PAGE_SIZE page so that zram have to handle it specially but zsmalloc can do it so let's remove unnecessary special handling code. Quote from Nitin "I think page vs handle distinction was added since xvmalloc could not handle full page allocation. Now that zsmalloc allows full page allocation, we can just use it for both cases. This would also allow removing the ZRAM_UNCOMPRESSED flag. The only downside will be slightly slower code path for full page allocation but this event is anyways supposed to be rare, so should be fine." 1. This patch reduces code very much. drivers/staging/zram/zram_drv.c | 104 +++++-------------------------------- drivers/staging/zram/zram_drv.h | 17 +----- drivers/staging/zram/zram_sysfs.c | 6 +-- 3 files changed, 15 insertions(+), 112 deletions(-) 2. change pages_expand with bad_compress so it can count bad compression(above 75%) ratio. 3. remove zobj_header which is for back-reference for defragmentation because firstly, it's not used at the moment and zsmalloc can't handle bigger size than PAGE_SIZE so zram can't do it any more without redesign. Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit 374a69191d12a0525b7ffe1197abc30c4795a230 Author: Minchan Kim <minchan@kernel.org> Date: Fri Jun 8 15:39:26 2012 +0900 staging: zram: fix random data read fd1a30de makes a bug that it uses (struct page *) as zsmalloc's handle although it's a uncompressed page so that it can access random page, return random data or even crashed by get_first_page in zs_map_object. Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: Jerome Marchand <jmarchan@redhat.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> commit c234434835b1f4bad9bdbae6710044cba387c9e5 Author: Minchan Kim <minchan@kernel.org> Date: Fri Jun 8 15:39:25 2012 +0900 staging: zsmalloc: zsmalloc: use unsigned long instead of void * We should use unsigned long as handle instead of void * to avoid any confusion. Without this, users may just treat zs_malloc return value as a pointer and try to deference it. This patch passed compile test(zram, zcache and ramster) and zram is tested on qemu. changelog * from v2 - remove hval pointed out by Nitin - based on next-20120607 * from v1 - change zcache's zv_create return value - baesd on next-20120604 Cc: Dan Magenheimer <dan.magenheimer@oracle.com> Acked-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Change-Id: I5157794e120bbae7ba1e5d0f122dae4fba4a079b Signed-off-by: Larry Bassel <lbassel@codeaurora.org>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/zram/Kconfig2
-rw-r--r--drivers/staging/zram/zram.txt27
-rw-r--r--drivers/staging/zram/zram_drv.c514
-rw-r--r--drivers/staging/zram/zram_drv.h46
-rw-r--r--drivers/staging/zram/zram_sysfs.c28
5 files changed, 245 insertions, 372 deletions
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index be5abe8e794..983314c4134 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -14,7 +14,7 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
- Project home: http://compcache.googlecode.com/
+ Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
index 5f75d298756..765d790ae83 100644
--- a/drivers/staging/zram/zram.txt
+++ b/drivers/staging/zram/zram.txt
@@ -23,17 +23,17 @@ Following shows a typical sequence of steps for using zram.
This creates 4 devices: /dev/zram{0,1,2,3}
(num_devices parameter is optional. Default: 1)
-2) Set Disksize (Optional):
- Set disk size by writing the value to sysfs node 'disksize'
- (in bytes). If disksize is not given, default value of 25%
- of RAM is used.
-
- # Initialize /dev/zram0 with 50MB disksize
- echo $((50*1024*1024)) > /sys/block/zram0/disksize
-
- NOTE: disksize cannot be changed if the disk contains any
- data. So, for such a disk, you need to issue 'reset' (see below)
- before you can change its disksize.
+2) Set Disksize
+ Set disk size by writing the value to sysfs node 'disksize'.
+ The value can be either in bytes or you can use mem suffixes.
+ Examples:
+ # Initialize /dev/zram0 with 50MB disksize
+ echo $((50*1024*1024)) > /sys/block/zram0/disksize
+
+ # Using mem suffixes
+ echo 256K > /sys/block/zram0/disksize
+ echo 512M > /sys/block/zram0/disksize
+ echo 1G > /sys/block/zram0/disksize
3) Activate:
mkswap /dev/zram0
@@ -65,8 +65,9 @@ Following shows a typical sequence of steps for using zram.
echo 1 > /sys/block/zram0/reset
echo 1 > /sys/block/zram1/reset
- (This frees all the memory allocated for the given device).
-
+ This frees all the memory allocated for the given device and
+ resets the disksize to zero. You must set the disksize again
+ before reusing the device.
Please report any problems at:
- Mailing list: linux-mm-cc at laptop dot org
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index df26f6c6353..e34e3fe0ae2 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -40,17 +40,7 @@ static int zram_major;
struct zram *zram_devices;
/* Module params (documentation at end) */
-static unsigned int num_devices;
-
-static void zram_stat_inc(u32 *v)
-{
- *v = *v + 1;
-}
-
-static void zram_stat_dec(u32 *v)
-{
- *v = *v - 1;
-}
+static unsigned int num_devices = 1;
static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
{
@@ -71,22 +61,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v)
zram_stat64_add(zram, v, 1);
}
-static int zram_test_flag(struct zram *zram, u32 index,
+static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- return zram->table[index].flags & BIT(flag);
+ return meta->table[index].flags & BIT(flag);
}
-static void zram_set_flag(struct zram *zram, u32 index,
+static void zram_set_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags |= BIT(flag);
+ meta->table[index].flags |= BIT(flag);
}
-static void zram_clear_flag(struct zram *zram, u32 index,
+static void zram_clear_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags &= ~BIT(flag);
+ meta->table[index].flags &= ~BIT(flag);
}
static int page_zero_filled(void *ptr)
@@ -104,70 +94,38 @@ static int page_zero_filled(void *ptr)
return 1;
}
-static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
-{
- if (!zram->disksize) {
- pr_info(
- "disk size not provided. You can use disksize_kb module "
- "param to specify size.\nUsing default: (%u%% of RAM).\n",
- default_disksize_perc_ram
- );
- zram->disksize = default_disksize_perc_ram *
- (totalram_bytes / 100);
- }
-
- if (zram->disksize > 2 * (totalram_bytes)) {
- pr_info(
- "There is little point creating a zram of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that zram uses about 0.1%% of the size of "
- "the disk when not in use so a huge zram is "
- "wasteful.\n"
- "\tMemory Size: %zu kB\n"
- "\tSize you selected: %llu kB\n"
- "Continuing anyway ...\n",
- totalram_bytes >> 10, zram->disksize
- );
- }
-
- zram->disksize &= PAGE_MASK;
-}
-
static void zram_free_page(struct zram *zram, size_t index)
{
- void *handle = zram->table[index].handle;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
+ u16 size = meta->table[index].size;
if (unlikely(!handle)) {
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
*/
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- zram_clear_flag(zram, index, ZRAM_ZERO);
- zram_stat_dec(&zram->stats.pages_zero);
+ if (zram_test_flag(meta, index, ZRAM_ZERO)) {
+ zram_clear_flag(meta, index, ZRAM_ZERO);
+ zram->stats.pages_zero--;
}
return;
}
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- __free_page(handle);
- zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_dec(&zram->stats.pages_expand);
- goto out;
- }
+ if (unlikely(size > max_zpage_size))
+ zram->stats.bad_compress--;
- zs_free(zram->mem_pool, handle);
+ zs_free(meta->mem_pool, handle);
- if (zram->table[index].size <= PAGE_SIZE / 2)
- zram_stat_dec(&zram->stats.good_compress);
+ if (size <= PAGE_SIZE / 2)
+ zram->stats.good_compress--;
-out:
zram_stat64_sub(zram, &zram->stats.compr_size,
- zram->table[index].size);
- zram_stat_dec(&zram->stats.pages_stored);
+ meta->table[index].size);
+ zram->stats.pages_stored--;
- zram->table[index].handle = NULL;
- zram->table[index].size = 0;
+ meta->table[index].handle = 0;
+ meta->table[index].size = 0;
}
static void handle_zero_page(struct bio_vec *bvec)
@@ -175,25 +133,9 @@ static void handle_zero_page(struct bio_vec *bvec)
struct page *page = bvec->bv_page;
void *user_mem;
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
- kunmap_atomic(user_mem, KM_USER0);
-
- flush_dcache_page(page);
-}
-
-static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
-{
- struct page *page = bvec->bv_page;
- unsigned char *user_mem, *cmem;
-
- user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
-
- memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
- kunmap_atomic(user_mem, KM_USER0);
- kunmap_atomic(cmem, KM_USER1);
+ kunmap_atomic(user_mem);
flush_dcache_page(page);
}
@@ -203,64 +145,26 @@ static inline int is_partial_io(struct bio_vec *bvec)
return bvec->bv_len != PAGE_SIZE;
}
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
- int ret;
- size_t clen;
- struct page *page;
- struct zobj_header *zheader;
- unsigned char *user_mem, *cmem, *uncmem = NULL;
-
- page = bvec->bv_page;
-
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- handle_zero_page(bvec);
- return 0;
- }
-
- /* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].handle)) {
- pr_debug("Read before write: sector=%lu, size=%u",
- (ulong)(bio->bi_sector), bio->bi_size);
- handle_zero_page(bvec);
- return 0;
- }
+ int ret = LZO_E_OK;
+ size_t clen = PAGE_SIZE;
+ unsigned char *cmem;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- handle_uncompressed_page(zram, bvec, index, offset);
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ memset(mem, 0, PAGE_SIZE);
return 0;
}
- if (is_partial_io(bvec)) {
- /* Use a temporary buffer to decompress the page */
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!uncmem) {
- pr_info("Error allocating temp memory!\n");
- return -ENOMEM;
- }
- }
-
- user_mem = kmap_atomic(page, KM_USER0);
- if (!is_partial_io(bvec))
- uncmem = user_mem;
- clen = PAGE_SIZE;
-
- cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
-
- ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- zram->table[index].size,
- uncmem, &clen);
-
- if (is_partial_io(bvec)) {
- memcpy(user_mem + bvec->bv_offset, uncmem + offset,
- bvec->bv_len);
- kfree(uncmem);
- }
-
- zs_unmap_object(zram->mem_pool, zram->table[index].handle);
- kunmap_atomic(user_mem, KM_USER0);
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (meta->table[index].size == PAGE_SIZE)
+ memcpy(mem, cmem, PAGE_SIZE);
+ else
+ ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
+ mem, &clen);
+ zs_unmap_object(meta->mem_pool, handle);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -269,182 +173,165 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return ret;
}
- flush_dcache_page(page);
-
return 0;
}
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
{
int ret;
- size_t clen = PAGE_SIZE;
- struct zobj_header *zheader;
- unsigned char *cmem;
+ struct page *page;
+ unsigned char *user_mem, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
+ page = bvec->bv_page;
- if (zram_test_flag(zram, index, ZRAM_ZERO) ||
- !zram->table[index].handle) {
- memset(mem, 0, PAGE_SIZE);
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
+ handle_zero_page(bvec);
return 0;
}
- cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
+ if (is_partial_io(bvec))
+ /* Use a temporary buffer to decompress the page */
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- memcpy(mem, cmem, PAGE_SIZE);
- kunmap_atomic(cmem, KM_USER0);
- return 0;
- }
+ user_mem = kmap_atomic(page);
+ if (!is_partial_io(bvec))
+ uncmem = user_mem;
- ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- zram->table[index].size,
- mem, &clen);
- zs_unmap_object(zram->mem_pool, zram->table[index].handle);
+ if (!uncmem) {
+ pr_info("Unable to allocate temp memory\n");
+ ret = -ENOMEM;
+ goto out_cleanup;
+ }
+ ret = zram_decompress_page(zram, uncmem, index);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- return ret;
- }
+ if (unlikely(ret != LZO_E_OK))
+ goto out_cleanup;
- return 0;
+ if (is_partial_io(bvec))
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+ bvec->bv_len);
+
+ flush_dcache_page(page);
+ ret = 0;
+out_cleanup:
+ kunmap_atomic(user_mem);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ return ret;
}
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
- int ret;
- u32 store_offset;
+ int ret = 0;
size_t clen;
- void *handle;
- struct zobj_header *zheader;
- struct page *page, *page_store;
+ unsigned long handle;
+ struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- src = zram->compress_buffer;
+ src = meta->compress_buffer;
if (is_partial_io(bvec)) {
/*
* This is a partial IO. We need to read the full page
* before to write the changes.
*/
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
if (!uncmem) {
- pr_info("Error allocating temp memory!\n");
ret = -ENOMEM;
goto out;
}
- ret = zram_read_before_write(zram, uncmem, index);
- if (ret) {
- kfree(uncmem);
+ ret = zram_decompress_page(zram, uncmem, index);
+ if (ret)
goto out;
- }
}
/*
* System overwrites unused sectors. Free memory associated
* with this sector now.
*/
- if (zram->table[index].handle ||
- zram_test_flag(zram, index, ZRAM_ZERO))
+ if (meta->table[index].handle ||
+ zram_test_flag(meta, index, ZRAM_ZERO))
zram_free_page(zram, index);
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
- zram_stat_inc(&zram->stats.pages_zero);
- zram_set_flag(zram, index, ZRAM_ZERO);
+ zram->stats.pages_zero++;
+ zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
- zram->compress_workmem);
+ meta->compress_workmem);
- kunmap_atomic(user_mem, KM_USER0);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
if (unlikely(ret != LZO_E_OK)) {
pr_err("Compression failed! err=%d\n", ret);
goto out;
}
- /*
- * Page is incompressible. Store it as-is (uncompressed)
- * since we do not want to return too many disk write
- * errors which has side effect of hanging the system.
- */
if (unlikely(clen > max_zpage_size)) {
+ zram->stats.bad_compress++;
clen = PAGE_SIZE;
- page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page_store)) {
- pr_info("Error allocating memory for "
- "incompressible page: %u\n", index);
- ret = -ENOMEM;
- goto out;
- }
-
- store_offset = 0;
- zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_inc(&zram->stats.pages_expand);
- handle = page_store;
- src = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(page_store, KM_USER1);
- goto memstore;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
- handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+ handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
ret = -ENOMEM;
goto out;
}
- cmem = zs_map_object(zram->mem_pool, handle);
-
-memstore:
-#if 0
- /* Back-reference needed for memory defragmentation */
- if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
- zheader = (struct zobj_header *)cmem;
- zheader->table_idx = index;
- cmem += sizeof(*zheader);
- }
-#endif
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ src = kmap_atomic(page);
memcpy(cmem, src, clen);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ kunmap_atomic(src);
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- kunmap_atomic(cmem, KM_USER1);
- kunmap_atomic(src, KM_USER0);
- } else {
- zs_unmap_object(zram->mem_pool, handle);
- }
+ zs_unmap_object(meta->mem_pool, handle);
- zram->table[index].handle = handle;
- zram->table[index].size = clen;
+ meta->table[index].handle = handle;
+ meta->table[index].size = clen;
/* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram_stat_inc(&zram->stats.pages_stored);
+ zram->stats.pages_stored++;
if (clen <= PAGE_SIZE / 2)
- zram_stat_inc(&zram->stats.good_compress);
-
- return 0;
+ zram->stats.good_compress++;
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
zram_stat64_inc(zram, &zram->stats.failed_writes);
return ret;
@@ -550,69 +437,56 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static int zram_make_request(struct request_queue *queue, struct bio *bio)
+static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
- if (unlikely(!zram->init_done) && zram_init_device(zram))
- goto error;
-
down_read(&zram->init_lock);
if (unlikely(!zram->init_done))
- goto error_unlock;
+ goto error;
if (!valid_io_request(zram, bio)) {
zram_stat64_inc(zram, &zram->stats.invalid_io);
- goto error_unlock;
+ goto error;
}
__zram_make_request(zram, bio, bio_data_dir(bio));
up_read(&zram->init_lock);
- return 0;
+ return;
-error_unlock:
- up_read(&zram->init_lock);
error:
+ up_read(&zram->init_lock);
bio_io_error(bio);
- return 0;
}
-void __zram_reset_device(struct zram *zram)
+static void __zram_reset_device(struct zram *zram)
{
size_t index;
+ struct zram_meta *meta;
- zram->init_done = 0;
-
- /* Free various per-device buffers */
- kfree(zram->compress_workmem);
- free_pages((unsigned long)zram->compress_buffer, 1);
+ if (!zram->init_done)
+ return;
- zram->compress_workmem = NULL;
- zram->compress_buffer = NULL;
+ meta = zram->meta;
+ zram->init_done = 0;
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- void *handle = zram->table[index].handle;
+ unsigned long handle = meta->table[index].handle;
if (!handle)
continue;
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- __free_page(handle);
- else
- zs_free(zram->mem_pool, handle);
+ zs_free(meta->mem_pool, handle);
}
- vfree(zram->table);
- zram->table = NULL;
-
- zs_destroy_pool(zram->mem_pool);
- zram->mem_pool = NULL;
-
+ zram_meta_free(zram->meta);
+ zram->meta = NULL;
/* Reset stats */
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
+ set_capacity(zram->disk, 0);
}
void zram_reset_device(struct zram *zram)
@@ -622,69 +496,84 @@ void zram_reset_device(struct zram *zram)
up_write(&zram->init_lock);
}
-int zram_init_device(struct zram *zram)
+void zram_meta_free(struct zram_meta *meta)
{
- int ret;
- size_t num_pages;
-
- down_write(&zram->init_lock);
+ zs_destroy_pool(meta->mem_pool);
+ kfree(meta->compress_workmem);
+ free_pages((unsigned long)meta->compress_buffer, 1);
+ vfree(meta->table);
+ kfree(meta);
+}
- if (zram->init_done) {
- up_write(&zram->init_lock);
- return 0;
- }
+struct zram_meta *zram_meta_alloc(u64 disksize)
+{
+ size_t num_pages;
+ struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto out;
- zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
+ meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!meta->compress_workmem)
+ goto free_meta;
- zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!zram->compress_workmem) {
- pr_err("Error allocating compressor working memory!\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
-
- zram->compress_buffer =
+ meta->compress_buffer =
(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!zram->compress_buffer) {
+ if (!meta->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
- ret = -ENOMEM;
- goto fail_no_table;
+ goto free_workmem;
}
- num_pages = zram->disksize >> PAGE_SHIFT;
- zram->table = vzalloc(num_pages * sizeof(*zram->table));
- if (!zram->table) {
+ num_pages = disksize >> PAGE_SHIFT;
+ meta->table = vzalloc(num_pages * sizeof(*meta->table));
+ if (!meta->table) {
pr_err("Error allocating zram address table\n");
- ret = -ENOMEM;
- goto fail_no_table;
+ goto free_buffer;
}
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ if (!meta->mem_pool) {
+ pr_err("Error creating memory pool\n");
+ goto free_table;
+ }
- /* zram devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+ return meta;
- zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
- if (!zram->mem_pool) {
- pr_err("Error creating memory pool\n");
- ret = -ENOMEM;
- goto fail;
+free_table:
+ vfree(meta->table);
+free_buffer:
+ free_pages((unsigned long)meta->compress_buffer, 1);
+free_workmem:
+ kfree(meta->compress_workmem);
+free_meta:
+ kfree(meta);
+ meta = NULL;
+out:
+ return meta;
+}
+
+void zram_init_device(struct zram *zram, struct zram_meta *meta)
+{
+ if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
+ pr_info(
+ "There is little point creating a zram of greater than "
+ "twice the size of memory since we expect a 2:1 compression "
+ "ratio. Note that zram uses about 0.1%% of the size of "
+ "the disk when not in use so a huge zram is "
+ "wasteful.\n"
+ "\tMemory Size: %lu kB\n"
+ "\tSize you selected: %llu kB\n"
+ "Continuing anyway ...\n",
+ (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
+ );
}
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+
+ zram->meta = meta;
zram->init_done = 1;
- up_write(&zram->init_lock);
pr_debug("Initialization done!\n");
- return 0;
-
-fail_no_table:
- /* To prevent accessing table entries during cleanup */
- zram->disksize = 0;
-fail:
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
- pr_err("Initialization failed: err=%d\n", ret);
- return ret;
}
static void zram_slot_free_notify(struct block_device *bdev,
@@ -725,7 +614,7 @@ static int create_device(struct zram *zram, int device_id)
zram->disk = alloc_disk(1);
if (!zram->disk) {
blk_cleanup_queue(zram->queue);
- pr_warning("Error allocating disk structure for device %d\n",
+ pr_warn("Error allocating disk structure for device %d\n",
device_id);
ret = -ENOMEM;
goto out;
@@ -756,7 +645,7 @@ static int create_device(struct zram *zram, int device_id)
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
if (ret < 0) {
- pr_warning("Error creating sysfs group");
+ pr_warn("Error creating sysfs group");
goto out;
}
@@ -790,7 +679,7 @@ static int __init zram_init(void)
int ret, dev_id;
if (num_devices > max_num_devices) {
- pr_warning("Invalid value for num_devices: %u\n",
+ pr_warn("Invalid value for num_devices: %u\n",
num_devices);
ret = -EINVAL;
goto out;
@@ -798,18 +687,12 @@ static int __init zram_init(void)
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
- pr_warning("Unable to get major number\n");
+ pr_warn("Unable to get major number\n");
ret = -EBUSY;
goto out;
}
- if (!num_devices) {
- pr_info("num_devices not specified. Using default: 1\n");
- num_devices = 1;
- }
-
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", num_devices);
zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
@@ -822,6 +705,8 @@ static int __init zram_init(void)
goto free_devices;
}
+ pr_info("Created %u device(s) ...\n", num_devices);
+
return 0;
free_devices:
@@ -843,8 +728,7 @@ static void __exit zram_exit(void)
zram = &zram_devices[i];
destroy_device(zram);
- if (zram->init_done)
- zram_reset_device(zram);
+ zram_reset_device(zram);
}
unregister_blkdev(zram_major, "zram");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 880f01388ed..2d1a3f1e8ed 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -26,23 +26,8 @@
*/
static const unsigned max_num_devices = 32;
-/*
- * Stored at beginning of each compressed object.
- *
- * It stores back-reference to table entry which points to this
- * object. This is required to support memory defragmentation.
- */
-struct zobj_header {
-#if 0
- u32 table_idx;
-#endif
-};
-
/*-- Configurable parameters */
-/* Default zram disk size: 25% of total RAM */
-static const unsigned default_disksize_perc_ram = 25;
-
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
@@ -51,8 +36,8 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*
* NOTE: max_zpage_size must be less than or equal to:
- * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
- * otherwise, xv_malloc() would always return failure.
+ * ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
+ * always return failure.
*/
/*-- End of configurable params */
@@ -68,9 +53,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
- /* Page is stored uncompressed */
- ZRAM_UNCOMPRESSED,
-
/* Page consists entirely of zeros */
ZRAM_ZERO,
@@ -81,11 +63,11 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct table {
- void *handle;
+ unsigned long handle;
u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
-} __attribute__((aligned(4)));
+} __aligned(4);
struct zram_stats {
u64 compr_size; /* compressed size of pages stored */
@@ -98,17 +80,21 @@ struct zram_stats {
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
- u32 pages_expand; /* % of incompressible pages */
+ u32 bad_compress; /* % of pages with compression ratio>=75% */
};
-struct zram {
- struct zs_pool *mem_pool;
+struct zram_meta {
void *compress_workmem;
void *compress_buffer;
struct table *table;
+ struct zs_pool *mem_pool;
+};
+
+struct zram {
+ struct zram_meta *meta;
spinlock_t stat64_lock; /* protect 64-bit stats */
- struct rw_semaphore lock; /* protect compression buffers against
- * concurrent writes */
+ struct rw_semaphore lock; /* protect compression buffers and table
+ * against concurrent read and writes */
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -129,7 +115,9 @@ unsigned int zram_get_num_devices(void);
extern struct attribute_group zram_disk_attr_group;
#endif
-extern int zram_init_device(struct zram *zram);
-extern void __zram_reset_device(struct zram *zram);
+extern void zram_reset_device(struct zram *zram);
+extern struct zram_meta *zram_meta_alloc(u64 disksize);
+extern void zram_meta_free(struct zram_meta *meta);
+extern void zram_init_device(struct zram *zram, struct zram_meta *meta);
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index 5142d074de8..68ccad121f3 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/mm.h>
+#include <linux/kernel.h>
#include "zram_drv.h"
@@ -54,23 +55,27 @@ static ssize_t disksize_show(struct device *dev,
static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- int ret;
u64 disksize;
+ struct zram_meta *meta;
struct zram *zram = dev_to_zram(dev);
- ret = kstrtoull(buf, 10, &disksize);
- if (ret)
- return ret;
+ disksize = memparse(buf, NULL);
+ if (!disksize)
+ return -EINVAL;
+ disksize = PAGE_ALIGN(disksize);
+ meta = zram_meta_alloc(disksize);
down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
+ zram_meta_free(meta);
pr_info("Cannot change disksize for initialized device\n");
return -EBUSY;
}
- zram->disksize = PAGE_ALIGN(disksize);
+ zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_init_device(zram, meta);
up_write(&zram->init_lock);
return len;
@@ -117,11 +122,7 @@ static ssize_t reset_store(struct device *dev,
if (bdev)
fsync_bdev(bdev);
- down_write(&zram->init_lock);
- if (zram->init_done)
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-
+ zram_reset_device(zram);
return len;
}
@@ -192,11 +193,10 @@ static ssize_t mem_used_total_show(struct device *dev,
{
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta = zram->meta;
- if (zram->init_done) {
- val = zs_get_total_size_bytes(zram->mem_pool) +
- ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
- }
+ if (zram->init_done)
+ val = zs_get_total_size_bytes(meta->mem_pool);
return sprintf(buf, "%llu\n", val);
}