mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-12-27 22:46:57 +00:00
thp: mprotect: transparent huge page support
Natively handle huge pmds when changing page tables on behalf of mprotect(). I left out update_mmu_cache() because we do not need it on x86 anyway but more importantly the interface works on ptes, not pmds. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b36f5b0710
commit
cd7548ab36
@ -22,6 +22,8 @@ extern int zap_huge_pmd(struct mmu_gather *tlb,
|
||||
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned char *vec);
|
||||
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, pgprot_t newprot);
|
||||
|
||||
enum transparent_hugepage_flag {
|
||||
TRANSPARENT_HUGEPAGE_FLAG,
|
||||
|
@ -948,6 +948,33 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, pgprot_t newprot)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
if (likely(pmd_trans_huge(*pmd))) {
|
||||
if (unlikely(pmd_trans_splitting(*pmd))) {
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
wait_split_huge_page(vma->anon_vma, pmd);
|
||||
} else {
|
||||
pmd_t entry;
|
||||
|
||||
entry = pmdp_get_and_clear(mm, addr, pmd);
|
||||
entry = pmd_modify(entry, newprot);
|
||||
set_pmd_at(mm, addr, pmd, entry);
|
||||
spin_unlock(&vma->vm_mm->page_table_lock);
|
||||
flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
|
||||
ret = 1;
|
||||
}
|
||||
} else
|
||||
spin_unlock(&vma->vm_mm->page_table_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
pmd_t *page_check_address_pmd(struct page *page,
|
||||
struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
|
@ -88,7 +88,13 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
split_huge_page_pmd(vma->vm_mm, pmd);
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
if (next - addr != HPAGE_PMD_SIZE)
|
||||
split_huge_page_pmd(vma->vm_mm, pmd);
|
||||
else if (change_huge_pmd(vma, pmd, addr, newprot))
|
||||
continue;
|
||||
/* fall through */
|
||||
}
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
continue;
|
||||
change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
|
||||
|
Loading…
Reference in New Issue
Block a user