mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 00:42:16 +00:00
migrate: add hugepage migration code to migrate_pages()
Extend check_range() to handle vma with VM_HUGETLB set. We will be able to migrate hugepage with migrate_pages(2) after applying the enablement patch which comes later in this series. Note that for larger hugepages (covered by pud entries, 1GB for x86_64 for example), we simply skip it now. Note that using pmd_huge/pud_huge assumes that hugepages are pointed to by pmd/pud. This is not true in some architectures implementing hugepage with other mechanisms like ia64, but it's OK because pmd_huge/pud_huge simply return 0 in such arch and page walker simply ignores such hugepages. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Acked-by: Hillf Danton <dhillf@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b8ec1cee5a
commit
e2d8cf4055
@ -515,6 +515,30 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
return addr != end;
|
||||
}
|
||||
|
||||
static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
const nodemask_t *nodes, unsigned long flags,
|
||||
void *private)
|
||||
{
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
int nid;
|
||||
struct page *page;
|
||||
|
||||
spin_lock(&vma->vm_mm->page_table_lock);
|
||||
page = pte_page(huge_ptep_get((pte_t *)pmd));
|
||||
nid = page_to_nid(page);
|
||||
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
|
||||
goto unlock;
|
||||
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
|
||||
if (flags & (MPOL_MF_MOVE_ALL) ||
|
||||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
|
||||
isolate_huge_page(page, private);
|
||||
unlock:
|
||||
spin_unlock(&vma->vm_mm->page_table_lock);
|
||||
#else
|
||||
BUG();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||
unsigned long addr, unsigned long end,
|
||||
const nodemask_t *nodes, unsigned long flags,
|
||||
@ -526,6 +550,13 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (!pmd_present(*pmd))
|
||||
continue;
|
||||
if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
|
||||
check_hugetlb_pmd_range(vma, pmd, nodes,
|
||||
flags, private);
|
||||
continue;
|
||||
}
|
||||
split_huge_page_pmd(vma, addr, pmd);
|
||||
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
|
||||
continue;
|
||||
@ -547,6 +578,8 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
||||
pud = pud_offset(pgd, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
|
||||
continue;
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
continue;
|
||||
if (check_pmd_range(vma, pud, addr, next, nodes,
|
||||
@ -638,9 +671,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
goto next;
|
||||
|
||||
if (flags & MPOL_MF_LAZY) {
|
||||
change_prot_numa(vma, start, endvma);
|
||||
goto next;
|
||||
@ -993,6 +1023,10 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
||||
|
||||
static struct page *new_node_page(struct page *page, unsigned long node, int **x)
|
||||
{
|
||||
if (PageHuge(page))
|
||||
return alloc_huge_page_node(page_hstate(compound_head(page)),
|
||||
node);
|
||||
else
|
||||
return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
|
||||
}
|
||||
|
||||
@ -1023,7 +1057,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
|
||||
err = migrate_pages(&pagelist, new_node_page, dest,
|
||||
MIGRATE_SYNC, MR_SYSCALL);
|
||||
if (err)
|
||||
putback_lru_pages(&pagelist);
|
||||
putback_movable_pages(&pagelist);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
Loading…
Reference in New Issue
Block a user