mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-23 09:56:00 +00:00
mm, compaction: do not count compact_stall if all zones skipped compaction
The compact_stall vmstat counter counts the number of allocations stalled by direct compaction. It does not count when all attempted zones had deferred compaction, but it does count when all zones skipped compaction. The skipping is decided based on very early check of compaction_suitable(), based on watermarks and memory fragmentation. Therefore it makes sense not to count skipped compactions as stalls. Moreover, compact_success or compact_fail is also already not being counted when compaction was skipped, so this patch changes the compact_stall counting to match the other two. Additionally, restructure __alloc_pages_direct_compact() code for better readability. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
53853e2d2b
commit
98dd3b48a7
@ -2301,7 +2301,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|||||||
{
|
{
|
||||||
struct zone *last_compact_zone = NULL;
|
struct zone *last_compact_zone = NULL;
|
||||||
unsigned long compact_result;
|
unsigned long compact_result;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
if (!order)
|
if (!order)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -2313,50 +2313,56 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|||||||
&last_compact_zone);
|
&last_compact_zone);
|
||||||
current->flags &= ~PF_MEMALLOC;
|
current->flags &= ~PF_MEMALLOC;
|
||||||
|
|
||||||
if (compact_result > COMPACT_DEFERRED)
|
switch (compact_result) {
|
||||||
count_vm_event(COMPACTSTALL);
|
case COMPACT_DEFERRED:
|
||||||
else
|
|
||||||
*deferred_compaction = true;
|
*deferred_compaction = true;
|
||||||
|
/* fall-through */
|
||||||
if (compact_result > COMPACT_SKIPPED) {
|
case COMPACT_SKIPPED:
|
||||||
struct page *page;
|
return NULL;
|
||||||
|
default:
|
||||||
/* Page migration frees to the PCP lists but we want merging */
|
break;
|
||||||
drain_pages(get_cpu());
|
|
||||||
put_cpu();
|
|
||||||
|
|
||||||
page = get_page_from_freelist(gfp_mask, nodemask,
|
|
||||||
order, zonelist, high_zoneidx,
|
|
||||||
alloc_flags & ~ALLOC_NO_WATERMARKS,
|
|
||||||
preferred_zone, classzone_idx, migratetype);
|
|
||||||
|
|
||||||
if (page) {
|
|
||||||
struct zone *zone = page_zone(page);
|
|
||||||
|
|
||||||
zone->compact_blockskip_flush = false;
|
|
||||||
compaction_defer_reset(zone, order, true);
|
|
||||||
count_vm_event(COMPACTSUCCESS);
|
|
||||||
return page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* last_compact_zone is where try_to_compact_pages thought
|
|
||||||
* allocation should succeed, so it did not defer compaction.
|
|
||||||
* But now we know that it didn't succeed, so we do the defer.
|
|
||||||
*/
|
|
||||||
if (last_compact_zone && mode != MIGRATE_ASYNC)
|
|
||||||
defer_compaction(last_compact_zone, order);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* It's bad if compaction run occurs and fails.
|
|
||||||
* The most likely reason is that pages exist,
|
|
||||||
* but not enough to satisfy watermarks.
|
|
||||||
*/
|
|
||||||
count_vm_event(COMPACTFAIL);
|
|
||||||
|
|
||||||
cond_resched();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* At least in one zone compaction wasn't deferred or skipped, so let's
|
||||||
|
* count a compaction stall
|
||||||
|
*/
|
||||||
|
count_vm_event(COMPACTSTALL);
|
||||||
|
|
||||||
|
/* Page migration frees to the PCP lists but we want merging */
|
||||||
|
drain_pages(get_cpu());
|
||||||
|
put_cpu();
|
||||||
|
|
||||||
|
page = get_page_from_freelist(gfp_mask, nodemask,
|
||||||
|
order, zonelist, high_zoneidx,
|
||||||
|
alloc_flags & ~ALLOC_NO_WATERMARKS,
|
||||||
|
preferred_zone, classzone_idx, migratetype);
|
||||||
|
|
||||||
|
if (page) {
|
||||||
|
struct zone *zone = page_zone(page);
|
||||||
|
|
||||||
|
zone->compact_blockskip_flush = false;
|
||||||
|
compaction_defer_reset(zone, order, true);
|
||||||
|
count_vm_event(COMPACTSUCCESS);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* last_compact_zone is where try_to_compact_pages thought allocation
|
||||||
|
* should succeed, so it did not defer compaction. But here we know
|
||||||
|
* that it didn't succeed, so we do the defer.
|
||||||
|
*/
|
||||||
|
if (last_compact_zone && mode != MIGRATE_ASYNC)
|
||||||
|
defer_compaction(last_compact_zone, order);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It's bad if compaction run occurs and fails. The most likely reason
|
||||||
|
* is that pages exist, but not enough to satisfy watermarks.
|
||||||
|
*/
|
||||||
|
count_vm_event(COMPACTFAIL);
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
Loading…
Reference in New Issue
Block a user