Commit d6db47e5 authored by Chunhai Guo's avatar Chunhai Guo Committed by Gao Xiang

erofs: do not use pagepool in z_erofs_gbuf_growsize()

Let's use alloc_pages_bulk_array() for simplicity and get rid of
unnecessary pagepool.
Signed-off-by: default avatarChunhai Guo <guochunhai@vivo.com>
Reviewed-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240402092757.2635257-1-guochunhai@vivo.comSigned-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
parent f36f3010
...@@ -60,63 +60,58 @@ void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock) ...@@ -60,63 +60,58 @@ void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
int z_erofs_gbuf_growsize(unsigned int nrpages) int z_erofs_gbuf_growsize(unsigned int nrpages)
{ {
static DEFINE_MUTEX(gbuf_resize_mutex); static DEFINE_MUTEX(gbuf_resize_mutex);
struct page *pagepool = NULL; struct page **tmp_pages = NULL;
int delta, ret, i, j; struct z_erofs_gbuf *gbuf;
void *ptr, *old_ptr;
int last, i, j;
mutex_lock(&gbuf_resize_mutex); mutex_lock(&gbuf_resize_mutex);
delta = nrpages - z_erofs_gbuf_nrpages;
ret = 0;
/* avoid shrinking gbufs, since no idea how many fses rely on */ /* avoid shrinking gbufs, since no idea how many fses rely on */
if (delta <= 0) if (nrpages <= z_erofs_gbuf_nrpages) {
goto out; mutex_unlock(&gbuf_resize_mutex);
return 0;
}
for (i = 0; i < z_erofs_gbuf_count; ++i) { for (i = 0; i < z_erofs_gbuf_count; ++i) {
struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; gbuf = &z_erofs_gbufpool[i];
struct page **pages, **tmp_pages;
void *ptr, *old_ptr = NULL;
ret = -ENOMEM;
tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL); tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL);
if (!tmp_pages) if (!tmp_pages)
break; goto out;
for (j = 0; j < nrpages; ++j) {
tmp_pages[j] = erofs_allocpage(&pagepool, GFP_KERNEL); for (j = 0; j < gbuf->nrpages; ++j)
if (!tmp_pages[j]) tmp_pages[j] = gbuf->pages[j];
goto free_pagearray; do {
} last = j;
j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
tmp_pages);
if (last == j)
goto out;
} while (j != nrpages);
ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL); ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
if (!ptr) if (!ptr)
goto free_pagearray; goto out;
pages = tmp_pages;
spin_lock(&gbuf->lock); spin_lock(&gbuf->lock);
kfree(gbuf->pages);
gbuf->pages = tmp_pages;
old_ptr = gbuf->ptr; old_ptr = gbuf->ptr;
gbuf->ptr = ptr; gbuf->ptr = ptr;
tmp_pages = gbuf->pages;
gbuf->pages = pages;
j = gbuf->nrpages;
gbuf->nrpages = nrpages; gbuf->nrpages = nrpages;
spin_unlock(&gbuf->lock); spin_unlock(&gbuf->lock);
ret = 0;
if (!tmp_pages) {
DBG_BUGON(old_ptr);
continue;
}
if (old_ptr) if (old_ptr)
vunmap(old_ptr); vunmap(old_ptr);
free_pagearray:
while (j)
erofs_pagepool_add(&pagepool, tmp_pages[--j]);
kfree(tmp_pages);
if (ret)
break;
} }
z_erofs_gbuf_nrpages = nrpages; z_erofs_gbuf_nrpages = nrpages;
erofs_release_pages(&pagepool);
out: out:
if (i < z_erofs_gbuf_count && tmp_pages) {
for (j = 0; j < nrpages; ++j)
if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j])
__free_page(tmp_pages[j]);
kfree(tmp_pages);
}
mutex_unlock(&gbuf_resize_mutex); mutex_unlock(&gbuf_resize_mutex);
return ret; return i < z_erofs_gbuf_count ? -ENOMEM : 0;
} }
int __init z_erofs_gbuf_init(void) int __init z_erofs_gbuf_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment