diff options
author | Sergey Senozhatsky <sergey.senozhatsky@gmail.com> | 2012-10-30 22:40:23 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-10-31 14:49:09 -0700 |
commit | 37b51fdddf64e7ba0971d070428655f8d6f36578 (patch) | |
tree | e43b30fd9195b901df7b0a17cffe82789f0e066d /drivers/staging/zram | |
parent | e446f5a8540a845ed92fca4109e67c8f0c76031f (diff) |
staging: zram: factor-out zram_decompress_page() function
zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
Factor-out and make commonly used zram_decompress_page() function, which also simplified
error handling in zram_bvec_read().
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Reviewed-by: Nitin Gupta <ngupta@vflare.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/zram')
-rw-r--r-- | drivers/staging/zram/zram_drv.c | 113 |
1 files changed, 48 insertions, 65 deletions
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 6edefde2372..fb4a7c94aed 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec) return bvec->bv_len != PAGE_SIZE; } -static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, - u32 index, int offset, struct bio *bio) +static int zram_decompress_page(struct zram *zram, char *mem, u32 index) { - int ret; - size_t clen; - struct page *page; - unsigned char *user_mem, *cmem, *uncmem = NULL; - - page = bvec->bv_page; - - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - handle_zero_page(bvec); - return 0; - } + int ret = LZO_E_OK; + size_t clen = PAGE_SIZE; + unsigned char *cmem; + unsigned long handle = zram->table[index].handle; - /* Requested page is not present in compressed area */ - if (unlikely(!zram->table[index].handle)) { - pr_debug("Read before write: sector=%lu, size=%u", - (ulong)(bio->bi_sector), bio->bi_size); - handle_zero_page(bvec); + if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + memset(mem, 0, PAGE_SIZE); return 0; } - if (is_partial_io(bvec)) { - /* Use a temporary buffer to decompress the page */ - uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!uncmem) { - pr_info("Error allocating temp memory!\n"); - return -ENOMEM; - } - } - - user_mem = kmap_atomic(page); - if (!is_partial_io(bvec)) - uncmem = user_mem; - clen = PAGE_SIZE; - - cmem = zs_map_object(zram->mem_pool, zram->table[index].handle, - ZS_MM_RO); - - if (zram->table[index].size == PAGE_SIZE) { - memcpy(uncmem, cmem, PAGE_SIZE); - ret = LZO_E_OK; - } else { + cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + if (zram->table[index].size == PAGE_SIZE) + memcpy(mem, cmem, PAGE_SIZE); + else ret = lzo1x_decompress_safe(cmem, zram->table[index].size, - uncmem, &clen); - } - - if (is_partial_io(bvec)) { - memcpy(user_mem + bvec->bv_offset, uncmem + offset, - bvec->bv_len); - kfree(uncmem); - } - - zs_unmap_object(zram->mem_pool, zram->table[index].handle); - kunmap_atomic(user_mem); + mem, &clen); + zs_unmap_object(zram->mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { @@ -247,36 +210,56 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, return ret; } - flush_dcache_page(page); - return 0; } -static int zram_read_before_write(struct zram *zram, char *mem, u32 index) +static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, + u32 index, int offset, struct bio *bio) { int ret; - size_t clen = PAGE_SIZE; - unsigned char *cmem; - unsigned long handle = zram->table[index].handle; + struct page *page; + unsigned char *user_mem, *uncmem = NULL; - if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) { - memset(mem, 0, PAGE_SIZE); + page = bvec->bv_page; + + if (unlikely(!zram->table[index].handle) || + zram_test_flag(zram, index, ZRAM_ZERO)) { + handle_zero_page(bvec); return 0; } - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); - ret = lzo1x_decompress_safe(cmem, zram->table[index].size, - mem, &clen); - zs_unmap_object(zram->mem_pool, handle); + user_mem = kmap_atomic(page); + if (is_partial_io(bvec)) + /* Use a temporary buffer to decompress the page */ + uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); + else + uncmem = user_mem; + if (!uncmem) { + pr_info("Unable to allocate temp memory\n"); + ret = -ENOMEM; + goto out_cleanup; + } + + ret = zram_decompress_page(zram, uncmem, index); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); zram_stat64_inc(zram, &zram->stats.failed_reads); - return ret; + goto out_cleanup; } - return 0; + if (is_partial_io(bvec)) + memcpy(user_mem + bvec->bv_offset, uncmem + offset, + bvec->bv_len); + + flush_dcache_page(page); + ret = 0; +out_cleanup: + kunmap_atomic(user_mem); + if (is_partial_io(bvec)) + kfree(uncmem); + return ret; } static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, @@ -302,7 +285,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ret = -ENOMEM; goto out; } - ret = zram_read_before_write(zram, uncmem, index); + ret = zram_decompress_page(zram, uncmem, index); if (ret) { kfree(uncmem); goto out; |