thp: do_huge_pmd_wp_page(): handle huge zero page

On write access to huge zero page we alloc a new huge page and clear it.

If ENOMEM, graceful fallback: we create a new pmd table and set pte around
fault address to newly allocated normal (4k) page.  All other ptes in the
pmd set to normal zero page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Kirill A. Shutemov 2012-12-12 13:50:54 -08:00 committed by Linus Torvalds
parent fc9fe822f7
commit 93b4796ded
3 changed files with 104 additions and 22 deletions

View File

@ -516,6 +516,14 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
} }
#endif #endif
#ifndef my_zero_pfn
static inline unsigned long my_zero_pfn(unsigned long addr)
{
extern unsigned long zero_pfn;
return zero_pfn;
}
#endif
/* /*
* Multiple processes may "see" the same page. E.g. for untouched * Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of * mappings of /dev/null, all processes see the same page full of

View File

@ -858,6 +858,70 @@ unlock:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, unsigned long haddr)
{
pgtable_t pgtable;
pmd_t _pmd;
struct page *page;
int i, ret = 0;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
ret |= VM_FAULT_OOM;
goto out;
}
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
}
clear_user_highpage(page, address);
__SetPageUptodate(page);
mmun_start = haddr;
mmun_end = haddr + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
spin_lock(&mm->page_table_lock);
pmdp_clear_flush(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
pgtable = pgtable_trans_huge_withdraw(mm);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
pte_t *pte, entry;
if (haddr == (address & PAGE_MASK)) {
entry = mk_pte(page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
page_add_new_anon_rmap(page, vma, haddr);
} else {
entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
entry = pte_mkspecial(entry);
}
pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
pte_unmap(pte);
}
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
spin_unlock(&mm->page_table_lock);
inc_mm_counter(mm, MM_ANONPAGES);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
ret |= VM_FAULT_WRITE;
out:
return ret;
}
static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long address, unsigned long address,
@ -964,19 +1028,21 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, pmd_t orig_pmd) unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
{ {
int ret = 0; int ret = 0;
struct page *page, *new_page; struct page *page = NULL, *new_page;
unsigned long haddr; unsigned long haddr;
unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */
VM_BUG_ON(!vma->anon_vma); VM_BUG_ON(!vma->anon_vma);
haddr = address & HPAGE_PMD_MASK;
if (is_huge_zero_pmd(orig_pmd))
goto alloc;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, orig_pmd))) if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_unlock; goto out_unlock;
page = pmd_page(orig_pmd); page = pmd_page(orig_pmd);
VM_BUG_ON(!PageCompound(page) || !PageHead(page)); VM_BUG_ON(!PageCompound(page) || !PageHead(page));
haddr = address & HPAGE_PMD_MASK;
if (page_mapcount(page) == 1) { if (page_mapcount(page) == 1) {
pmd_t entry; pmd_t entry;
entry = pmd_mkyoung(orig_pmd); entry = pmd_mkyoung(orig_pmd);
@ -988,7 +1054,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
get_page(page); get_page(page);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
alloc:
if (transparent_hugepage_enabled(vma) && if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) !transparent_hugepage_debug_cow())
new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
@ -998,24 +1064,34 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!new_page)) { if (unlikely(!new_page)) {
count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address, if (is_huge_zero_pmd(orig_pmd)) {
pmd, orig_pmd, page, haddr); ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
if (ret & VM_FAULT_OOM) address, pmd, haddr);
split_huge_page(page); } else {
put_page(page); ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
if (ret & VM_FAULT_OOM)
split_huge_page(page);
put_page(page);
}
goto out; goto out;
} }
count_vm_event(THP_FAULT_ALLOC); count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page); put_page(new_page);
split_huge_page(page); if (page) {
put_page(page); split_huge_page(page);
put_page(page);
}
ret |= VM_FAULT_OOM; ret |= VM_FAULT_OOM;
goto out; goto out;
} }
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); if (is_huge_zero_pmd(orig_pmd))
clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
else
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
mmun_start = haddr; mmun_start = haddr;
@ -1023,7 +1099,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
put_page(page); if (page)
put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) { if (unlikely(!pmd_same(*pmd, orig_pmd))) {
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(new_page); mem_cgroup_uncharge_page(new_page);
@ -1031,14 +1108,18 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_mn; goto out_mn;
} else { } else {
pmd_t entry; pmd_t entry;
VM_BUG_ON(!PageHead(page));
entry = mk_huge_pmd(new_page, vma); entry = mk_huge_pmd(new_page, vma);
pmdp_clear_flush(vma, haddr, pmd); pmdp_clear_flush(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr); page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, pmd); update_mmu_cache_pmd(vma, address, pmd);
page_remove_rmap(page); if (is_huge_zero_pmd(orig_pmd))
put_page(page); add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
else {
VM_BUG_ON(!PageHead(page));
page_remove_rmap(page);
put_page(page);
}
ret |= VM_FAULT_WRITE; ret |= VM_FAULT_WRITE;
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);

View File

@ -724,13 +724,6 @@ static inline int is_zero_pfn(unsigned long pfn)
} }
#endif #endif
#ifndef my_zero_pfn
static inline unsigned long my_zero_pfn(unsigned long addr)
{
return zero_pfn;
}
#endif
/* /*
* vm_normal_page -- This function gets the "struct page" associated with a pte. * vm_normal_page -- This function gets the "struct page" associated with a pte.
* *