mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-11-23 11:59:58 +00:00
mremap: properly flush TLB before releasing the page
Commit eb66ae030829605d61fbef1909ce310e29f78821 upstream. This is a backport to stable 3.18.y, based on Will Deacon's 4.4.y backport. Jann Horn points out that our TLB flushing was subtly wrong for the mremap() case. What makes mremap() special is that we don't follow the usual "add page to list of pages to be freed, then flush tlb, and then free pages". No, mremap() obviously just _moves_ the page from one page table location to another. That matters, because mremap() thus doesn't directly control the lifetime of the moved page with a freelist: instead, the lifetime of the page is controlled by the page table locking, that serializes access to the entry. As a result, we need to flush the TLB not just before releasing the lock for the source location (to avoid any concurrent accesses to the entry), but also before we release the destination page table lock (to avoid the TLB being flushed after somebody else has already done something to that page). This also makes the whole "need_flush" logic unnecessary, since we now always end up flushing the TLB for every valid entry. Bug: 118836219 Reported-and-tested-by: Jann Horn <jannh@google.com> Acked-by: Will Deacon <will.deacon@arm.com> Tested-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [will: backport to 4.4 stable] Signed-off-by: Will Deacon <will.deacon@arm.com> [ghackmann@google.com: adjust context] Signed-off-by: Greg Hackmann <ghackmann@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Change-Id: I653b28b6c2fd6ec00e4b0be2b3289dcab1dcc4b1 Signed-off-by: Greg Hackmann <ghackmann@google.com>
This commit is contained in:
parent
22853b1ca4
commit
cd9d01e8c1
@ -1447,7 +1447,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
||||
{
|
||||
int ret = 0;
|
||||
pmd_t pmd;
|
||||
|
||||
bool force_flush = false;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
if ((old_addr & ~HPAGE_PMD_MASK) ||
|
||||
@ -1468,8 +1468,12 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
||||
ret = __pmd_trans_huge_lock(old_pmd, vma);
|
||||
if (ret == 1) {
|
||||
pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
|
||||
if (pmd_present(pmd))
|
||||
force_flush = true;
|
||||
VM_BUG_ON(!pmd_none(*new_pmd));
|
||||
set_pmd_at(mm, new_addr, new_pmd, pmd);
|
||||
if (force_flush)
|
||||
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
}
|
||||
out:
|
||||
|
21
mm/mremap.c
21
mm/mremap.c
@ -79,6 +79,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pte_t *old_pte, *new_pte, pte;
|
||||
spinlock_t *old_ptl, *new_ptl;
|
||||
bool force_flush = false;
|
||||
unsigned long len = old_end - old_addr;
|
||||
|
||||
/*
|
||||
* When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma
|
||||
@ -125,11 +127,25 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
if (pte_none(*old_pte))
|
||||
continue;
|
||||
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
||||
/*
|
||||
* If we are remapping a valid PTE, make sure
|
||||
* to flush TLB before we drop the PTL for the PTE.
|
||||
*
|
||||
* NOTE! Both old and new PTL matter: the old one
|
||||
* for racing with page_mkclean(), the new one to
|
||||
* make sure the physical page stays valid until
|
||||
* the TLB entry for the old mapping has been
|
||||
* flushed.
|
||||
*/
|
||||
if (pte_present(pte))
|
||||
force_flush = true;
|
||||
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
||||
set_pte_at(mm, new_addr, new_pte, pte);
|
||||
}
|
||||
|
||||
arch_leave_lazy_mmu_mode();
|
||||
if (force_flush)
|
||||
flush_tlb_range(vma, old_end - len, old_end);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_unlock(new_ptl);
|
||||
pte_unmap(new_pte - 1);
|
||||
@ -149,7 +165,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
{
|
||||
unsigned long extent, next, old_end;
|
||||
pmd_t *old_pmd, *new_pmd;
|
||||
bool need_flush = false;
|
||||
unsigned long mmun_start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end; /* For mmu_notifiers */
|
||||
|
||||
@ -187,7 +202,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
anon_vma_unlock_write(vma->anon_vma);
|
||||
}
|
||||
if (err > 0) {
|
||||
need_flush = true;
|
||||
continue;
|
||||
} else if (!err) {
|
||||
split_huge_page_pmd(vma, old_addr, old_pmd);
|
||||
@ -204,10 +218,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
extent = LATENCY_LIMIT;
|
||||
move_ptes(vma, old_pmd, old_addr, old_addr + extent,
|
||||
new_vma, new_pmd, new_addr, need_rmap_locks);
|
||||
need_flush = true;
|
||||
}
|
||||
if (likely(need_flush))
|
||||
flush_tlb_range(vma, old_end-len, old_addr);
|
||||
|
||||
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user