mm: page migration remove_migration_ptes at lock+unlock level

Clean up page migration a little more by calling remove_migration_ptes()
from the same level, on success or on failure, from __unmap_and_move() or
from unmap_and_move_huge_page().

Don't reset page->mapping of a PageAnon old page in move_to_new_page(),
leave that to when the page is freed.  Except for here in page migration,
it has been an invariant that a PageAnon (bit set in page->mapping) page
stays PageAnon until it is freed, and I think we're safer to keep to that.

And with the above rearrangement, it's necessary because zap_pte_range()
wants to identify whether a migration entry represents a file or an anon
page, to update the appropriate rss stats without waiting on it.

Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Hugh Dickins 2015-11-05 18:49:53 -08:00 committed by Linus Torvalds
parent 7db7671f83
commit 5c3f9a6737

View File

@ -722,7 +722,7 @@ static int fallback_migrate_page(struct address_space *mapping,
* MIGRATEPAGE_SUCCESS - success * MIGRATEPAGE_SUCCESS - success
*/ */
static int move_to_new_page(struct page *newpage, struct page *page, static int move_to_new_page(struct page *newpage, struct page *page,
int page_was_mapped, enum migrate_mode mode) enum migrate_mode mode)
{ {
struct address_space *mapping; struct address_space *mapping;
int rc; int rc;
@ -755,19 +755,21 @@ static int move_to_new_page(struct page *newpage, struct page *page,
* space which also has its own migratepage callback. This * space which also has its own migratepage callback. This
* is the most common path for page migration. * is the most common path for page migration.
*/ */
rc = mapping->a_ops->migratepage(mapping, rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
newpage, page, mode);
else else
rc = fallback_migrate_page(mapping, newpage, page, mode); rc = fallback_migrate_page(mapping, newpage, page, mode);
if (rc != MIGRATEPAGE_SUCCESS) { /*
* When successful, old pagecache page->mapping must be cleared before
* page is freed; but stats require that PageAnon be left as PageAnon.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
set_page_memcg(page, NULL);
if (!PageAnon(page))
page->mapping = NULL;
} else {
set_page_memcg(newpage, NULL); set_page_memcg(newpage, NULL);
newpage->mapping = NULL; newpage->mapping = NULL;
} else {
set_page_memcg(page, NULL);
if (page_was_mapped)
remove_migration_ptes(page, newpage);
page->mapping = NULL;
} }
return rc; return rc;
} }
@ -902,10 +904,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
} }
if (!page_mapped(page)) if (!page_mapped(page))
rc = move_to_new_page(newpage, page, page_was_mapped, mode); rc = move_to_new_page(newpage, page, mode);
if (rc && page_was_mapped) if (page_was_mapped)
remove_migration_ptes(page, page); remove_migration_ptes(page,
rc == MIGRATEPAGE_SUCCESS ? newpage : page);
out_unlock_both: out_unlock_both:
unlock_page(newpage); unlock_page(newpage);
@ -1066,10 +1069,11 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
} }
if (!page_mapped(hpage)) if (!page_mapped(hpage))
rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode); rc = move_to_new_page(new_hpage, hpage, mode);
if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped) if (page_was_mapped)
remove_migration_ptes(hpage, hpage); remove_migration_ptes(hpage,
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage);
unlock_page(new_hpage); unlock_page(new_hpage);