Commit d0d8da2d authored by Sergey Senozhatsky's avatar Sergey Senozhatsky Committed by Linus Torvalds

zsmalloc: require GFP in zs_malloc()

Pass GFP flags to zs_malloc() instead of using a fixed mask supplied to
zs_create_pool(), so we can be more flexible, but, more importantly, we
need this to switch zram to per-cpu compression streams -- zram will try
to allocate handle with preemption disabled in a fast path and switch to
a slow path (using different gfp mask) if the fast one has failed.

Apart from that, this also align zs_malloc() interface with zspool/zbud.

[sergey.senozhatsky@gmail.com: pass GFP flags to zs_malloc() instead of using a fixed mask]
  Link: http://lkml.kernel.org/r/20160429150942.GA637@swordfish
Link: http://lkml.kernel.org/r/20160429150942.GA637@swordfishSigned-off-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1ee47165
...@@ -514,7 +514,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize) ...@@ -514,7 +514,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
goto out_error; goto out_error;
} }
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); meta->mem_pool = zs_create_pool(pool_name);
if (!meta->mem_pool) { if (!meta->mem_pool) {
pr_err("Error creating memory pool\n"); pr_err("Error creating memory pool\n");
goto out_error; goto out_error;
...@@ -717,7 +717,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -717,7 +717,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem; src = uncmem;
} }
handle = zs_malloc(meta->mem_pool, clen); handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM);
if (!handle) { if (!handle) {
pr_err("Error allocating memory for compressed page: %u, size=%zu\n", pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
index, clen); index, clen);
......
...@@ -41,10 +41,10 @@ struct zs_pool_stats { ...@@ -41,10 +41,10 @@ struct zs_pool_stats {
struct zs_pool; struct zs_pool;
struct zs_pool *zs_create_pool(const char *name, gfp_t flags); struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool); void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size); unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj); void zs_free(struct zs_pool *pool, unsigned long obj);
void *zs_map_object(struct zs_pool *pool, unsigned long handle, void *zs_map_object(struct zs_pool *pool, unsigned long handle,
......
...@@ -247,7 +247,6 @@ struct zs_pool { ...@@ -247,7 +247,6 @@ struct zs_pool {
struct size_class **size_class; struct size_class **size_class;
struct kmem_cache *handle_cachep; struct kmem_cache *handle_cachep;
gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated; atomic_long_t pages_allocated;
struct zs_pool_stats stats; struct zs_pool_stats stats;
...@@ -295,10 +294,10 @@ static void destroy_handle_cache(struct zs_pool *pool) ...@@ -295,10 +294,10 @@ static void destroy_handle_cache(struct zs_pool *pool)
kmem_cache_destroy(pool->handle_cachep); kmem_cache_destroy(pool->handle_cachep);
} }
static unsigned long alloc_handle(struct zs_pool *pool) static unsigned long alloc_handle(struct zs_pool *pool, gfp_t gfp)
{ {
return (unsigned long)kmem_cache_alloc(pool->handle_cachep, return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
pool->flags & ~__GFP_HIGHMEM); gfp & ~__GFP_HIGHMEM);
} }
static void free_handle(struct zs_pool *pool, unsigned long handle) static void free_handle(struct zs_pool *pool, unsigned long handle)
...@@ -324,7 +323,12 @@ static void *zs_zpool_create(const char *name, gfp_t gfp, ...@@ -324,7 +323,12 @@ static void *zs_zpool_create(const char *name, gfp_t gfp,
const struct zpool_ops *zpool_ops, const struct zpool_ops *zpool_ops,
struct zpool *zpool) struct zpool *zpool)
{ {
return zs_create_pool(name, gfp); /*
* Ignore global gfp flags: zs_malloc() may be invoked from
* different contexts and its caller must provide a valid
* gfp mask.
*/
return zs_create_pool(name);
} }
static void zs_zpool_destroy(void *pool) static void zs_zpool_destroy(void *pool)
...@@ -335,7 +339,7 @@ static void zs_zpool_destroy(void *pool) ...@@ -335,7 +339,7 @@ static void zs_zpool_destroy(void *pool)
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
unsigned long *handle) unsigned long *handle)
{ {
*handle = zs_malloc(pool, size); *handle = zs_malloc(pool, size, gfp);
return *handle ? 0 : -1; return *handle ? 0 : -1;
} }
static void zs_zpool_free(void *pool, unsigned long handle) static void zs_zpool_free(void *pool, unsigned long handle)
...@@ -1391,7 +1395,7 @@ static unsigned long obj_malloc(struct size_class *class, ...@@ -1391,7 +1395,7 @@ static unsigned long obj_malloc(struct size_class *class,
* otherwise 0. * otherwise 0.
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/ */
unsigned long zs_malloc(struct zs_pool *pool, size_t size) unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{ {
unsigned long handle, obj; unsigned long handle, obj;
struct size_class *class; struct size_class *class;
...@@ -1400,7 +1404,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size) ...@@ -1400,7 +1404,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0; return 0;
handle = alloc_handle(pool); handle = alloc_handle(pool, gfp);
if (!handle) if (!handle)
return 0; return 0;
...@@ -1413,7 +1417,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size) ...@@ -1413,7 +1417,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (!first_page) { if (!first_page) {
spin_unlock(&class->lock); spin_unlock(&class->lock);
first_page = alloc_zspage(class, pool->flags); first_page = alloc_zspage(class, gfp);
if (unlikely(!first_page)) { if (unlikely(!first_page)) {
free_handle(pool, handle); free_handle(pool, handle);
return 0; return 0;
...@@ -1878,7 +1882,7 @@ static int zs_register_shrinker(struct zs_pool *pool) ...@@ -1878,7 +1882,7 @@ static int zs_register_shrinker(struct zs_pool *pool)
* On success, a pointer to the newly created pool is returned, * On success, a pointer to the newly created pool is returned,
* otherwise NULL. * otherwise NULL.
*/ */
struct zs_pool *zs_create_pool(const char *name, gfp_t flags) struct zs_pool *zs_create_pool(const char *name)
{ {
int i; int i;
struct zs_pool *pool; struct zs_pool *pool;
...@@ -1948,8 +1952,6 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags) ...@@ -1948,8 +1952,6 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
prev_class = class; prev_class = class;
} }
pool->flags = flags;
if (zs_pool_stat_create(pool, name)) if (zs_pool_stat_create(pool, name))
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment