mm: add cma pcp list

Add a cma pcp list in order to increase cma memory utilization.

Increased cma memory utilization will improve overall memory
utilization because free cma pages are ignored when memory reclaim
is done with gfp mask GFP_KERNEL.

Since most memory reclaim is done by kswapd, which uses a gfp mask
of GFP_KERNEL, by increasing cma memory utilization we are therefore
ensuring that less aggressive memory reclaim takes place.

Increased cma memory utilization will improve performance,
for example it will increase app concurrency.

Change-Id: I809589a25c6abca51f1c963f118adfc78e955cf9
Signed-off-by: Liam Mark <lmark@codeaurora.org>
This commit is contained in:
Liam Mark 2014-06-23 14:13:47 -07:00
parent 02cf247cbe
commit 0114d9148a
3 changed files with 65 additions and 47 deletions

View File

@ -39,8 +39,6 @@ enum {
MIGRATE_UNMOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_MOVABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_RESERVE = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@ -57,8 +55,10 @@ enum {
*/
MIGRATE_CMA,
#endif
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_RESERVE = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
MIGRATE_ISOLATE , /* can't allocate from here */
#endif
MIGRATE_TYPES
};
@ -74,9 +74,11 @@ extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_cma_pageblock(page) false
# define is_migrate_cma(migratetype) false
# define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#define for_each_migratetype_order(order, type) \

View File

@ -1164,34 +1164,12 @@ retry_reserve:
return page;
}
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
int migratetype)
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
struct page *page = 0;
#ifdef CONFIG_CMA
if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
page = __rmqueue_smallest(zone, order, MIGRATE_CMA);
if (!page)
#endif
retry_reserve :
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
page = __rmqueue_fallback(zone, order, migratetype);
/*
* Use MIGRATE_RESERVE rather than fail an allocation. goto
* is used because __rmqueue_smallest is an inline function
* and we want just one call site
*/
if (!page) {
migratetype = MIGRATE_RESERVE;
goto retry_reserve;
}
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
if (IS_ENABLED(CONFIG_CMA))
if (!zone->cma_alloc)
page = __rmqueue_smallest(zone, order, MIGRATE_CMA);
return page;
}
@ -1202,15 +1180,21 @@ retry_reserve :
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, int cold, int cma)
int migratetype, int cold)
{
int mt = migratetype, i;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page;
if (cma)
page = __rmqueue_cma(zone, order, migratetype);
/*
* If migrate type CMA is being requested only try to
* satisfy the request with CMA pages to try and increase
* CMA utlization.
*/
if (is_migrate_cma(migratetype))
page = __rmqueue_cma(zone, order);
else
page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
@ -1245,6 +1229,27 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
/*
* Return the pcp list that corresponds to the migrate type if that list isn't
* empty.
* If the list is empty return NULL.
*/
static struct list_head *get_populated_pcp_list(struct zone *zone,
unsigned int order, struct per_cpu_pages *pcp,
int migratetype, int cold)
{
struct list_head *list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, order,
pcp->batch, list,
migratetype, cold);
if (list_empty(list))
list = NULL;
}
return list;
}
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@ -1415,8 +1420,7 @@ void free_hot_cold_page(struct page *page, int cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(is_migrate_isolate(migratetype)) ||
is_migrate_cma(migratetype)) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
@ -1558,23 +1562,33 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
int migratetype)
{
unsigned long flags;
struct page *page;
struct page *page = NULL;
int cold = !!(gfp_flags & __GFP_COLD);
again:
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
struct list_head *list = NULL;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold,
gfp_flags & __GFP_CMA);
if (unlikely(list_empty(list)))
/* First try to get CMA pages */
if (migratetype == MIGRATE_MOVABLE &&
gfp_flags & __GFP_CMA) {
list = get_populated_pcp_list(zone, 0, pcp,
get_cma_migrate_type(), cold);
}
if (list == NULL) {
/*
* Either CMA is not suitable or there are no free CMA
* pages.
*/
list = get_populated_pcp_list(zone, 0, pcp,
migratetype, cold);
if (unlikely(list == NULL) ||
unlikely(list_empty(list)))
goto failed;
}
@ -1600,10 +1614,12 @@ again:
WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
if (gfp_flags & __GFP_CMA)
page = __rmqueue_cma(zone, order, migratetype);
else
if (migratetype == MIGRATE_MOVABLE && gfp_flags & __GFP_CMA)
page = __rmqueue_cma(zone, order);
if (!page)
page = __rmqueue(zone, order, migratetype);
spin_unlock(&zone->lock);
if (!page)
goto failed;

View File

@ -629,10 +629,10 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Reclaimable",
"Movable",
"Reserve",
#ifdef CONFIG_CMA
"CMA",
#endif
"Reserve",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif