mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 09:22:37 +00:00
mm: page migration use the put_new_page whenever necessary
I don't know of any problem from the way it's used in our current tree, but there is one defect in page migration's custom put_new_page feature. An unused newpage is expected to be released with the put_new_page(), but there was one MIGRATEPAGE_SUCCESS (0) path which released it with putback_lru_page(): which can be very wrong for a custom pool. Fixed more easily by resetting put_new_page once it won't be needed, than by adding a further flag to modify the rc test. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
14e0f9bcc9
commit
2def7424c9
19
mm/migrate.c
19
mm/migrate.c
@ -938,10 +938,11 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
|
||||
int force, enum migrate_mode mode,
|
||||
enum migrate_reason reason)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc = MIGRATEPAGE_SUCCESS;
|
||||
int *result = NULL;
|
||||
struct page *newpage = get_new_page(page, private, &result);
|
||||
struct page *newpage;
|
||||
|
||||
newpage = get_new_page(page, private, &result);
|
||||
if (!newpage)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -955,6 +956,8 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
|
||||
goto out;
|
||||
|
||||
rc = __unmap_and_move(page, newpage, force, mode);
|
||||
if (rc == MIGRATEPAGE_SUCCESS)
|
||||
put_new_page = NULL;
|
||||
|
||||
out:
|
||||
if (rc != -EAGAIN) {
|
||||
@ -981,7 +984,7 @@ out:
|
||||
* it. Otherwise, putback_lru_page() will drop the reference grabbed
|
||||
* during isolation.
|
||||
*/
|
||||
if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
|
||||
if (put_new_page) {
|
||||
ClearPageSwapBacked(newpage);
|
||||
put_new_page(newpage, private);
|
||||
} else if (unlikely(__is_movable_balloon_page(newpage))) {
|
||||
@ -1022,7 +1025,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
||||
struct page *hpage, int force,
|
||||
enum migrate_mode mode)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc = -EAGAIN;
|
||||
int *result = NULL;
|
||||
int page_was_mapped = 0;
|
||||
struct page *new_hpage;
|
||||
@ -1044,8 +1047,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
||||
if (!new_hpage)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = -EAGAIN;
|
||||
|
||||
if (!trylock_page(hpage)) {
|
||||
if (!force || mode != MIGRATE_SYNC)
|
||||
goto out;
|
||||
@ -1070,8 +1071,10 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
||||
if (anon_vma)
|
||||
put_anon_vma(anon_vma);
|
||||
|
||||
if (rc == MIGRATEPAGE_SUCCESS)
|
||||
if (rc == MIGRATEPAGE_SUCCESS) {
|
||||
hugetlb_cgroup_migrate(hpage, new_hpage);
|
||||
put_new_page = NULL;
|
||||
}
|
||||
|
||||
unlock_page(hpage);
|
||||
out:
|
||||
@ -1083,7 +1086,7 @@ out:
|
||||
* it. Otherwise, put_page() will drop the reference grabbed during
|
||||
* isolation.
|
||||
*/
|
||||
if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
|
||||
if (put_new_page)
|
||||
put_new_page(new_hpage, private);
|
||||
else
|
||||
putback_active_hugepage(new_hpage);
|
||||
|
Loading…
Reference in New Issue
Block a user