mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-11-24 20:50:30 +00:00
[BLOCK] Don't pin lots of memory in mempools
Currently we scale the mempool sizes depending on memory installed in the machine, except for the bio pool itself which sits at a fixed 256 entry pre-allocation. There's really no point in "optimizing" this OOM path, we just need enough preallocated to make progress. A single unit is enough, lets scale it down to 2 just to be on the safe side. This patch saves ~150kb of pinned kernel memory on a 32-bit box. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
b9099ff63c
commit
5972511b77
@ -867,7 +867,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
goto bad4;
|
||||
}
|
||||
|
||||
cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4);
|
||||
cc->bs = bioset_create(MIN_IOS, MIN_IOS);
|
||||
if (!cc->bs) {
|
||||
ti->error = "Cannot allocate crypt bioset";
|
||||
goto bad_bs;
|
||||
|
@ -60,7 +60,7 @@ static int resize_pool(unsigned int new_ios)
|
||||
if (!_io_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
_bios = bioset_create(16, 16, 4);
|
||||
_bios = bioset_create(16, 16);
|
||||
if (!_bios) {
|
||||
mempool_destroy(_io_pool);
|
||||
_io_pool = NULL;
|
||||
|
@ -1012,7 +1012,7 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
if (!md->tio_pool)
|
||||
goto bad3;
|
||||
|
||||
md->bs = bioset_create(16, 16, 4);
|
||||
md->bs = bioset_create(16, 16);
|
||||
if (!md->bs)
|
||||
goto bad_no_bioset;
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
|
||||
#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
|
||||
#define SG_MEMPOOL_SIZE 32
|
||||
#define SG_MEMPOOL_SIZE 2
|
||||
|
||||
struct scsi_host_sg_pool {
|
||||
size_t size;
|
||||
|
41
fs/bio.c
41
fs/bio.c
@ -28,7 +28,7 @@
|
||||
#include <linux/blktrace_api.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#define BIO_POOL_SIZE 256
|
||||
#define BIO_POOL_SIZE 2
|
||||
|
||||
static struct kmem_cache *bio_slab __read_mostly;
|
||||
|
||||
@ -38,7 +38,7 @@ static struct kmem_cache *bio_slab __read_mostly;
|
||||
* a small number of entries is fine, not going to be performance critical.
|
||||
* basically we just need to survive
|
||||
*/
|
||||
#define BIO_SPLIT_ENTRIES 8
|
||||
#define BIO_SPLIT_ENTRIES 2
|
||||
mempool_t *bio_split_pool __read_mostly;
|
||||
|
||||
struct biovec_slab {
|
||||
@ -1120,7 +1120,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
|
||||
* create memory pools for biovec's in a bio_set.
|
||||
* use the global biovec slabs created for general use.
|
||||
*/
|
||||
static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
|
||||
static int biovec_create_pools(struct bio_set *bs, int pool_entries)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -1128,9 +1128,6 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
|
||||
struct biovec_slab *bp = bvec_slabs + i;
|
||||
mempool_t **bvp = bs->bvec_pools + i;
|
||||
|
||||
if (pool_entries > 1 && i >= scale)
|
||||
pool_entries >>= 1;
|
||||
|
||||
*bvp = mempool_create_slab_pool(pool_entries, bp->slab);
|
||||
if (!*bvp)
|
||||
return -ENOMEM;
|
||||
@ -1161,7 +1158,7 @@ void bioset_free(struct bio_set *bs)
|
||||
kfree(bs);
|
||||
}
|
||||
|
||||
struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
|
||||
struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
|
||||
{
|
||||
struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
|
||||
|
||||
@ -1172,7 +1169,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
|
||||
if (!bs->bio_pool)
|
||||
goto bad;
|
||||
|
||||
if (!biovec_create_pools(bs, bvec_pool_size, scale))
|
||||
if (!biovec_create_pools(bs, bvec_pool_size))
|
||||
return bs;
|
||||
|
||||
bad:
|
||||
@ -1196,38 +1193,12 @@ static void __init biovec_init_slabs(void)
|
||||
|
||||
static int __init init_bio(void)
|
||||
{
|
||||
int megabytes, bvec_pool_entries;
|
||||
int scale = BIOVEC_NR_POOLS;
|
||||
|
||||
bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
|
||||
|
||||
biovec_init_slabs();
|
||||
|
||||
megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* find out where to start scaling
|
||||
*/
|
||||
if (megabytes <= 16)
|
||||
scale = 0;
|
||||
else if (megabytes <= 32)
|
||||
scale = 1;
|
||||
else if (megabytes <= 64)
|
||||
scale = 2;
|
||||
else if (megabytes <= 96)
|
||||
scale = 3;
|
||||
else if (megabytes <= 128)
|
||||
scale = 4;
|
||||
|
||||
/*
|
||||
* Limit number of entries reserved -- mempools are only used when
|
||||
* the system is completely unable to allocate memory, so we only
|
||||
* need enough to make progress.
|
||||
*/
|
||||
bvec_pool_entries = 1 + scale;
|
||||
|
||||
fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
|
||||
fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
|
||||
if (!fs_bio_set)
|
||||
panic("bio: can't allocate bios\n");
|
||||
|
||||
|
@ -276,7 +276,7 @@ extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
|
||||
extern mempool_t *bio_split_pool;
|
||||
extern void bio_pair_release(struct bio_pair *dbio);
|
||||
|
||||
extern struct bio_set *bioset_create(int, int, int);
|
||||
extern struct bio_set *bioset_create(int, int);
|
||||
extern void bioset_free(struct bio_set *);
|
||||
|
||||
extern struct bio *bio_alloc(gfp_t, int);
|
||||
|
Loading…
Reference in New Issue
Block a user