Commit 33ddb687 authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] limit size of bio_vec pools

We are currently wasting ~2MiB on the bio pools. This is ok on systems
with plenty of ram, but it's too much for a 16mb system for instance.

This patch scales the bio_vec mempool sizes a bit. The logic is mainly:

+       megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
+       if (megabytes <= 16)
+               scale = 0;
+       else if (megabytes <= 32)
+               scale = 1;
+       else if (megabytes <= 64)
+               scale = 2;
+       else if (megabytes <= 96)
+               scale = 3;
+       else if (megabytes <= 128)
+               scale = 4;

and then for mempool setup:

+               if (i >= scale)
+                       pool_entries >>= 1;
+
+               bp->pool = mempool_create(pool_entries, slab_pool_alloc,
                                        slab_pool_free, bp->slab);

So we allocate less and less entries for the bigger sized pools. It
doesn't make too much sense to fill the memory with sg tables for 256
page entries on a 16mb system.

In addition, we select a starting nr_pool_entries point, based on amount
of ram as well:

+       pool_entries = megabytes * 2;
+       if (pool_entries > 256)
+               pool_entries = 256;

The end-result is that on a 128mb system, it looks like:

BIO: pool of 256 setup, 14Kb (56 bytes/bio)
biovec pool[0]:   1 bvecs: 244 entries (12 bytes)
biovec pool[1]:   4 bvecs: 244 entries (48 bytes)
biovec pool[2]:  16 bvecs: 244 entries (192 bytes)
biovec pool[3]:  64 bvecs: 244 entries (768 bytes)
biovec pool[4]: 128 bvecs: 122 entries (1536 bytes)
biovec pool[5]: 256 bvecs:  61 entries (3072 bytes)

ie a total of ~620KiB used. Booting with mem=32m gives us:

BIO: pool of 256 setup, 14Kb (56 bytes/bio)
biovec pool[0]:   1 bvecs:  56 entries (12 bytes)
biovec pool[1]:   4 bvecs:  28 entries (48 bytes)
biovec pool[2]:  16 bvecs:  14 entries (192 bytes)
biovec pool[3]:  64 bvecs:   7 entries (768 bytes)
biovec pool[4]: 128 bvecs:   3 entries (1536 bytes)
biovec pool[5]: 256 bvecs:   1 entries (3072 bytes)

ie a total of ~31KiB. Booting with 512mb makes it:

BIO: pool of 256 setup, 14Kb (56 bytes/bio)
biovec pool[0]:   1 bvecs: 256 entries (12 bytes)
biovec pool[1]:   4 bvecs: 256 entries (48 bytes)
biovec pool[2]:  16 bvecs: 256 entries (192 bytes)
biovec pool[3]:  64 bvecs: 256 entries (768 bytes)
biovec pool[4]: 128 bvecs: 256 entries (1536 bytes)
biovec pool[5]: 256 bvecs: 256 entries (3072 bytes)

which is the same as before. The cut-off point is somewhere a bit over
256mb. Andrew suggested we may want to 'cheat' a bit here, and leave the
busy pools alone. We know that mpage is going to be heavy on the 16
entry pool, so it migh make sense to make such a pool and not scale
that. We can deal with that later, though.
parent e80bc959
No related merge requests found
......@@ -33,7 +33,7 @@ static kmem_cache_t *bio_slab;
#define BIOVEC_NR_POOLS 6
struct biovec_pool {
int size;
int nr_vecs;
char *name;
kmem_cache_t *slab;
mempool_t *pool;
......@@ -88,7 +88,7 @@ static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, int *idx)
bvl = mempool_alloc(bp->pool, gfp_mask);
if (bvl)
memset(bvl, 0, bp->size);
memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec));
return bvl;
}
......@@ -457,27 +457,55 @@ void bio_endio(struct bio *bio, int uptodate)
bio->bi_end_io(bio);
}
static void __init biovec_init_pool(void)
static void __init biovec_init_pools(void)
{
int i, size;
int i, size, megabytes, pool_entries = BIO_POOL_SIZE;
int scale = BIOVEC_NR_POOLS;
megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
/*
* find out where to start scaling
*/
if (megabytes <= 16)
scale = 0;
else if (megabytes <= 32)
scale = 1;
else if (megabytes <= 64)
scale = 2;
else if (megabytes <= 96)
scale = 3;
else if (megabytes <= 128)
scale = 4;
/*
* scale number of entries
*/
pool_entries = megabytes * 2;
if (pool_entries > 256)
pool_entries = 256;
for (i = 0; i < BIOVEC_NR_POOLS; i++) {
struct biovec_pool *bp = bvec_array + i;
size = bp->size * sizeof(struct bio_vec);
printk("biovec: init pool %d, %d entries, %d bytes\n", i,
bp->size, size);
size = bp->nr_vecs * sizeof(struct bio_vec);
bp->slab = kmem_cache_create(bp->name, size, 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!bp->slab)
panic("biovec: can't init slab cache\n");
bp->pool = mempool_create(BIO_POOL_SIZE, slab_pool_alloc,
if (i >= scale)
pool_entries >>= 1;
bp->pool = mempool_create(pool_entries, slab_pool_alloc,
slab_pool_free, bp->slab);
if (!bp->pool)
panic("biovec: can't init mempool\n");
bp->size = size;
printk("biovec pool[%d]: %3d bvecs: %3d entries (%d bytes)\n",
i, bp->nr_vecs, pool_entries,
size);
}
}
......@@ -493,7 +521,7 @@ static int __init init_bio(void)
printk("BIO: pool of %d setup, %ZuKb (%Zd bytes/bio)\n", BIO_POOL_SIZE, BIO_POOL_SIZE * sizeof(struct bio) >> 10, sizeof(struct bio));
biovec_init_pool();
biovec_init_pools();
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment