Commit 8b3cc3ed authored by Minchan Kim's avatar Minchan Kim Committed by Greg Kroah-Hartman

zram: get rid of lockdep warning

Lockdep complains about recursive deadlock of zram->init_lock.
[1] made it false positive because we can't request IO to zram
before setting disksize. Anyway, we should shut lockdep up to
avoid many reporting from user.

[1] : zram: force disksize setting before using zram
Acked-by: default avatarJerome Marchand <jmarchan@redhat.com>
Acked-by: default avatarNitin Gupta <ngupta@vflare.org>
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 152bce6b
...@@ -61,22 +61,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) ...@@ -61,22 +61,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v)
zram_stat64_add(zram, v, 1); zram_stat64_add(zram, v, 1);
} }
static int zram_test_flag(struct zram *zram, u32 index, static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag) enum zram_pageflags flag)
{ {
return zram->table[index].flags & BIT(flag); return meta->table[index].flags & BIT(flag);
} }
static void zram_set_flag(struct zram *zram, u32 index, static void zram_set_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag) enum zram_pageflags flag)
{ {
zram->table[index].flags |= BIT(flag); meta->table[index].flags |= BIT(flag);
} }
static void zram_clear_flag(struct zram *zram, u32 index, static void zram_clear_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag) enum zram_pageflags flag)
{ {
zram->table[index].flags &= ~BIT(flag); meta->table[index].flags &= ~BIT(flag);
} }
static int page_zero_filled(void *ptr) static int page_zero_filled(void *ptr)
...@@ -96,16 +96,17 @@ static int page_zero_filled(void *ptr) ...@@ -96,16 +96,17 @@ static int page_zero_filled(void *ptr)
static void zram_free_page(struct zram *zram, size_t index) static void zram_free_page(struct zram *zram, size_t index)
{ {
unsigned long handle = zram->table[index].handle; struct zram_meta *meta = zram->meta;
u16 size = zram->table[index].size; unsigned long handle = meta->table[index].handle;
u16 size = meta->table[index].size;
if (unlikely(!handle)) { if (unlikely(!handle)) {
/* /*
* No memory is allocated for zero filled pages. * No memory is allocated for zero filled pages.
* Simply clear zero page flag. * Simply clear zero page flag.
*/ */
if (zram_test_flag(zram, index, ZRAM_ZERO)) { if (zram_test_flag(meta, index, ZRAM_ZERO)) {
zram_clear_flag(zram, index, ZRAM_ZERO); zram_clear_flag(meta, index, ZRAM_ZERO);
zram->stats.pages_zero--; zram->stats.pages_zero--;
} }
return; return;
...@@ -114,17 +115,17 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -114,17 +115,17 @@ static void zram_free_page(struct zram *zram, size_t index)
if (unlikely(size > max_zpage_size)) if (unlikely(size > max_zpage_size))
zram->stats.bad_compress--; zram->stats.bad_compress--;
zs_free(zram->mem_pool, handle); zs_free(meta->mem_pool, handle);
if (size <= PAGE_SIZE / 2) if (size <= PAGE_SIZE / 2)
zram->stats.good_compress--; zram->stats.good_compress--;
zram_stat64_sub(zram, &zram->stats.compr_size, zram_stat64_sub(zram, &zram->stats.compr_size,
zram->table[index].size); meta->table[index].size);
zram->stats.pages_stored--; zram->stats.pages_stored--;
zram->table[index].handle = 0; meta->table[index].handle = 0;
zram->table[index].size = 0; meta->table[index].size = 0;
} }
static void handle_zero_page(struct bio_vec *bvec) static void handle_zero_page(struct bio_vec *bvec)
...@@ -149,20 +150,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) ...@@ -149,20 +150,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
int ret = LZO_E_OK; int ret = LZO_E_OK;
size_t clen = PAGE_SIZE; size_t clen = PAGE_SIZE;
unsigned char *cmem; unsigned char *cmem;
unsigned long handle = zram->table[index].handle; struct zram_meta *meta = zram->meta;
unsigned long handle = meta->table[index].handle;
if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
memset(mem, 0, PAGE_SIZE); memset(mem, 0, PAGE_SIZE);
return 0; return 0;
} }
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (zram->table[index].size == PAGE_SIZE) if (meta->table[index].size == PAGE_SIZE)
memcpy(mem, cmem, PAGE_SIZE); memcpy(mem, cmem, PAGE_SIZE);
else else
ret = lzo1x_decompress_safe(cmem, zram->table[index].size, ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
mem, &clen); mem, &clen);
zs_unmap_object(zram->mem_pool, handle); zs_unmap_object(meta->mem_pool, handle);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) { if (unlikely(ret != LZO_E_OK)) {
...@@ -180,11 +182,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -180,11 +182,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
int ret; int ret;
struct page *page; struct page *page;
unsigned char *user_mem, *uncmem = NULL; unsigned char *user_mem, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
page = bvec->bv_page; page = bvec->bv_page;
if (unlikely(!zram->table[index].handle) || if (unlikely(!meta->table[index].handle) ||
zram_test_flag(zram, index, ZRAM_ZERO)) { zram_test_flag(meta, index, ZRAM_ZERO)) {
handle_zero_page(bvec); handle_zero_page(bvec);
return 0; return 0;
} }
...@@ -232,9 +234,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -232,9 +234,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
unsigned long handle; unsigned long handle;
struct page *page; struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL; unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
page = bvec->bv_page; page = bvec->bv_page;
src = zram->compress_buffer; src = meta->compress_buffer;
if (is_partial_io(bvec)) { if (is_partial_io(bvec)) {
/* /*
...@@ -256,8 +259,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -256,8 +259,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* System overwrites unused sectors. Free memory associated * System overwrites unused sectors. Free memory associated
* with this sector now. * with this sector now.
*/ */
if (zram->table[index].handle || if (meta->table[index].handle ||
zram_test_flag(zram, index, ZRAM_ZERO)) zram_test_flag(meta, index, ZRAM_ZERO))
zram_free_page(zram, index); zram_free_page(zram, index);
user_mem = kmap_atomic(page); user_mem = kmap_atomic(page);
...@@ -276,13 +279,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -276,13 +279,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (is_partial_io(bvec)) if (is_partial_io(bvec))
kfree(uncmem); kfree(uncmem);
zram->stats.pages_zero++; zram->stats.pages_zero++;
zram_set_flag(zram, index, ZRAM_ZERO); zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0; ret = 0;
goto out; goto out;
} }
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
zram->compress_workmem); meta->compress_workmem);
if (!is_partial_io(bvec)) { if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem); kunmap_atomic(user_mem);
...@@ -303,14 +306,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -303,14 +306,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem; src = uncmem;
} }
handle = zs_malloc(zram->mem_pool, clen); handle = zs_malloc(meta->mem_pool, clen);
if (!handle) { if (!handle) {
pr_info("Error allocating memory for compressed " pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen); "page: %u, size=%zu\n", index, clen);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
src = kmap_atomic(page); src = kmap_atomic(page);
...@@ -318,10 +321,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -318,10 +321,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
kunmap_atomic(src); kunmap_atomic(src);
zs_unmap_object(zram->mem_pool, handle); zs_unmap_object(meta->mem_pool, handle);
zram->table[index].handle = handle; meta->table[index].handle = handle;
zram->table[index].size = clen; meta->table[index].size = clen;
/* Update stats */ /* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen); zram_stat64_add(zram, &zram->stats.compr_size, clen);
...@@ -464,34 +467,25 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio) ...@@ -464,34 +467,25 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
void __zram_reset_device(struct zram *zram) void __zram_reset_device(struct zram *zram)
{ {
size_t index; size_t index;
struct zram_meta *meta;
if (!zram->init_done) if (!zram->init_done)
return; return;
meta = zram->meta;
zram->init_done = 0; zram->init_done = 0;
/* Free various per-device buffers */
kfree(zram->compress_workmem);
free_pages((unsigned long)zram->compress_buffer, 1);
zram->compress_workmem = NULL;
zram->compress_buffer = NULL;
/* Free all pages that are still in this zram device */ /* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
unsigned long handle = zram->table[index].handle; unsigned long handle = meta->table[index].handle;
if (!handle) if (!handle)
continue; continue;
zs_free(zram->mem_pool, handle); zs_free(meta->mem_pool, handle);
} }
vfree(zram->table); zram_meta_free(zram->meta);
zram->table = NULL; zram->meta = NULL;
zs_destroy_pool(zram->mem_pool);
zram->mem_pool = NULL;
/* Reset stats */ /* Reset stats */
memset(&zram->stats, 0, sizeof(zram->stats)); memset(&zram->stats, 0, sizeof(zram->stats));
...@@ -506,12 +500,65 @@ void zram_reset_device(struct zram *zram) ...@@ -506,12 +500,65 @@ void zram_reset_device(struct zram *zram)
up_write(&zram->init_lock); up_write(&zram->init_lock);
} }
/* zram->init_lock should be held */ void zram_meta_free(struct zram_meta *meta)
int zram_init_device(struct zram *zram) {
zs_destroy_pool(meta->mem_pool);
kfree(meta->compress_workmem);
free_pages((unsigned long)meta->compress_buffer, 1);
vfree(meta->table);
kfree(meta);
}
struct zram_meta *zram_meta_alloc(u64 disksize)
{ {
int ret;
size_t num_pages; size_t num_pages;
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
goto out;
meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!meta->compress_workmem) {
pr_err("Error allocating compressor working memory!\n");
goto free_meta;
}
meta->compress_buffer =
(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!meta->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
goto free_workmem;
}
num_pages = disksize >> PAGE_SHIFT;
meta->table = vzalloc(num_pages * sizeof(*meta->table));
if (!meta->table) {
pr_err("Error allocating zram address table\n");
goto free_buffer;
}
meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto free_table;
}
return meta;
free_table:
vfree(meta->table);
free_buffer:
free_pages((unsigned long)meta->compress_buffer, 1);
free_workmem:
kfree(meta->compress_workmem);
free_meta:
kfree(meta);
meta = NULL;
out:
return meta;
}
void zram_init_device(struct zram *zram, struct zram_meta *meta)
{
if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) { if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
pr_info( pr_info(
"There is little point creating a zram of greater than " "There is little point creating a zram of greater than "
...@@ -526,51 +573,13 @@ int zram_init_device(struct zram *zram) ...@@ -526,51 +573,13 @@ int zram_init_device(struct zram *zram)
); );
} }
zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!zram->compress_workmem) {
pr_err("Error allocating compressor working memory!\n");
ret = -ENOMEM;
goto fail_no_table;
}
zram->compress_buffer =
(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
goto fail_no_table;
}
num_pages = zram->disksize >> PAGE_SHIFT;
zram->table = vzalloc(num_pages * sizeof(*zram->table));
if (!zram->table) {
pr_err("Error allocating zram address table\n");
ret = -ENOMEM;
goto fail_no_table;
}
/* zram devices sort of resembles non-rotational disks */ /* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
zram->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); zram->meta = meta;
if (!zram->mem_pool) {
pr_err("Error creating memory pool\n");
ret = -ENOMEM;
goto fail;
}
zram->init_done = 1; zram->init_done = 1;
pr_debug("Initialization done!\n"); pr_debug("Initialization done!\n");
return 0;
fail_no_table:
/* To prevent accessing table entries during cleanup */
zram->disksize = 0;
fail:
__zram_reset_device(zram);
pr_err("Initialization failed: err=%d\n", ret);
return ret;
} }
static void zram_slot_free_notify(struct block_device *bdev, static void zram_slot_free_notify(struct block_device *bdev,
......
...@@ -83,11 +83,15 @@ struct zram_stats { ...@@ -83,11 +83,15 @@ struct zram_stats {
u32 bad_compress; /* % of pages with compression ratio>=75% */ u32 bad_compress; /* % of pages with compression ratio>=75% */
}; };
struct zram { struct zram_meta {
struct zs_pool *mem_pool;
void *compress_workmem; void *compress_workmem;
void *compress_buffer; void *compress_buffer;
struct table *table; struct table *table;
struct zs_pool *mem_pool;
};
struct zram {
struct zram_meta *meta;
spinlock_t stat64_lock; /* protect 64-bit stats */ spinlock_t stat64_lock; /* protect 64-bit stats */
struct rw_semaphore lock; /* protect compression buffers and table struct rw_semaphore lock; /* protect compression buffers and table
* against concurrent read and writes */ * against concurrent read and writes */
...@@ -111,7 +115,9 @@ unsigned int zram_get_num_devices(void); ...@@ -111,7 +115,9 @@ unsigned int zram_get_num_devices(void);
extern struct attribute_group zram_disk_attr_group; extern struct attribute_group zram_disk_attr_group;
#endif #endif
extern int zram_init_device(struct zram *zram);
extern void zram_reset_device(struct zram *zram); extern void zram_reset_device(struct zram *zram);
extern struct zram_meta *zram_meta_alloc(u64 disksize);
extern void zram_meta_free(struct zram_meta *meta);
extern void zram_init_device(struct zram *zram, struct zram_meta *meta);
#endif #endif
...@@ -56,22 +56,26 @@ static ssize_t disksize_store(struct device *dev, ...@@ -56,22 +56,26 @@ static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len) struct device_attribute *attr, const char *buf, size_t len)
{ {
u64 disksize; u64 disksize;
struct zram_meta *meta;
struct zram *zram = dev_to_zram(dev); struct zram *zram = dev_to_zram(dev);
disksize = memparse(buf, NULL); disksize = memparse(buf, NULL);
if (!disksize) if (!disksize)
return -EINVAL; return -EINVAL;
disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize);
down_write(&zram->init_lock); down_write(&zram->init_lock);
if (zram->init_done) { if (zram->init_done) {
up_write(&zram->init_lock); up_write(&zram->init_lock);
zram_meta_free(meta);
pr_info("Cannot change disksize for initialized device\n"); pr_info("Cannot change disksize for initialized device\n");
return -EBUSY; return -EBUSY;
} }
zram->disksize = PAGE_ALIGN(disksize); zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
zram_init_device(zram); zram_init_device(zram, meta);
up_write(&zram->init_lock); up_write(&zram->init_lock);
return len; return len;
...@@ -182,9 +186,10 @@ static ssize_t mem_used_total_show(struct device *dev, ...@@ -182,9 +186,10 @@ static ssize_t mem_used_total_show(struct device *dev,
{ {
u64 val = 0; u64 val = 0;
struct zram *zram = dev_to_zram(dev); struct zram *zram = dev_to_zram(dev);
struct zram_meta *meta = zram->meta;
if (zram->init_done) if (zram->init_done)
val = zs_get_total_size_bytes(zram->mem_pool); val = zs_get_total_size_bytes(meta->mem_pool);
return sprintf(buf, "%llu\n", val); return sprintf(buf, "%llu\n", val);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment