mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-11-23 11:59:58 +00:00
cd9d01e8c1
Commit eb66ae030829605d61fbef1909ce310e29f78821 upstream. This is a backport to stable 3.18.y, based on Will Deacon's 4.4.y backport. Jann Horn points out that our TLB flushing was subtly wrong for the mremap() case. What makes mremap() special is that we don't follow the usual "add page to list of pages to be freed, then flush tlb, and then free pages". No, mremap() obviously just _moves_ the page from one page table location to another. That matters, because mremap() thus doesn't directly control the lifetime of the moved page with a freelist: instead, the lifetime of the page is controlled by the page table locking, that serializes access to the entry. As a result, we need to flush the TLB not just before releasing the lock for the source location (to avoid any concurrent accesses to the entry), but also before we release the destination page table lock (to avoid the TLB being flushed after somebody else has already done something to that page). This also makes the whole "need_flush" logic unnecessary, since we now always end up flushing the TLB for every valid entry. Bug: 118836219 Reported-and-tested-by: Jann Horn <jannh@google.com> Acked-by: Will Deacon <will.deacon@arm.com> Tested-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [will: backport to 4.4 stable] Signed-off-by: Will Deacon <will.deacon@arm.com> [ghackmann@google.com: adjust context] Signed-off-by: Greg Hackmann <ghackmann@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Change-Id: I653b28b6c2fd6ec00e4b0be2b3289dcab1dcc4b1 Signed-off-by: Greg Hackmann <ghackmann@google.com>
578 lines
15 KiB
C
578 lines
15 KiB
C
/*
|
|
* mm/mremap.c
|
|
*
|
|
* (C) Copyright 1996 Linus Torvalds
|
|
*
|
|
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
|
|
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/ksm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/capability.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/security.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/sched/sysctl.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include "internal.h"
|
|
|
|
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
return NULL;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
if (pud_none_or_clear_bad(pud))
|
|
return NULL;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
if (pmd_none(*pmd))
|
|
return NULL;
|
|
|
|
return pmd;
|
|
}
|
|
|
|
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
pud = pud_alloc(mm, pgd, addr);
|
|
if (!pud)
|
|
return NULL;
|
|
|
|
pmd = pmd_alloc(mm, pud, addr);
|
|
if (!pmd)
|
|
return NULL;
|
|
|
|
VM_BUG_ON(pmd_trans_huge(*pmd));
|
|
|
|
return pmd;
|
|
}
|
|
|
|
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
unsigned long old_addr, unsigned long old_end,
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
unsigned long new_addr, bool need_rmap_locks)
|
|
{
|
|
struct address_space *mapping = NULL;
|
|
struct anon_vma *anon_vma = NULL;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
bool force_flush = false;
|
|
unsigned long len = old_end - old_addr;
|
|
|
|
/*
|
|
* When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma
|
|
* locks to ensure that rmap will always observe either the old or the
|
|
* new ptes. This is the easiest way to avoid races with
|
|
* truncate_pagecache(), page migration, etc...
|
|
*
|
|
* When need_rmap_locks is false, we use other ways to avoid
|
|
* such races:
|
|
*
|
|
* - During exec() shift_arg_pages(), we use a specially tagged vma
|
|
* which rmap call sites look for using is_vma_temporary_stack().
|
|
*
|
|
* - During mremap(), new_vma is often known to be placed after vma
|
|
* in rmap traversal order. This ensures rmap will always observe
|
|
* either the old pte, or the new pte, or both (the page table locks
|
|
* serialize access to individual ptes, but only rmap traversal
|
|
* order guarantees that we won't miss both the old and new ptes).
|
|
*/
|
|
if (need_rmap_locks) {
|
|
if (vma->vm_file) {
|
|
mapping = vma->vm_file->f_mapping;
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
}
|
|
if (vma->anon_vma) {
|
|
anon_vma = vma->anon_vma;
|
|
anon_vma_lock_write(anon_vma);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* We don't have to worry about the ordering of src and dst
|
|
* pte locks because exclusive mmap_sem prevents deadlock.
|
|
*/
|
|
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
|
|
new_pte = pte_offset_map(new_pmd, new_addr);
|
|
new_ptl = pte_lockptr(mm, new_pmd);
|
|
if (new_ptl != old_ptl)
|
|
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
|
arch_enter_lazy_mmu_mode();
|
|
|
|
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
|
|
new_pte++, new_addr += PAGE_SIZE) {
|
|
if (pte_none(*old_pte))
|
|
continue;
|
|
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
|
/*
|
|
* If we are remapping a valid PTE, make sure
|
|
* to flush TLB before we drop the PTL for the PTE.
|
|
*
|
|
* NOTE! Both old and new PTL matter: the old one
|
|
* for racing with page_mkclean(), the new one to
|
|
* make sure the physical page stays valid until
|
|
* the TLB entry for the old mapping has been
|
|
* flushed.
|
|
*/
|
|
if (pte_present(pte))
|
|
force_flush = true;
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
set_pte_at(mm, new_addr, new_pte, pte);
|
|
}
|
|
|
|
arch_leave_lazy_mmu_mode();
|
|
if (force_flush)
|
|
flush_tlb_range(vma, old_end - len, old_end);
|
|
if (new_ptl != old_ptl)
|
|
spin_unlock(new_ptl);
|
|
pte_unmap(new_pte - 1);
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
if (anon_vma)
|
|
anon_vma_unlock_write(anon_vma);
|
|
if (mapping)
|
|
mutex_unlock(&mapping->i_mmap_mutex);
|
|
}
|
|
|
|
#define LATENCY_LIMIT (64 * PAGE_SIZE)
|
|
|
|
unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
unsigned long old_addr, struct vm_area_struct *new_vma,
|
|
unsigned long new_addr, unsigned long len,
|
|
bool need_rmap_locks)
|
|
{
|
|
unsigned long extent, next, old_end;
|
|
pmd_t *old_pmd, *new_pmd;
|
|
unsigned long mmun_start; /* For mmu_notifiers */
|
|
unsigned long mmun_end; /* For mmu_notifiers */
|
|
|
|
old_end = old_addr + len;
|
|
flush_cache_range(vma, old_addr, old_end);
|
|
|
|
mmun_start = old_addr;
|
|
mmun_end = old_end;
|
|
mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
|
|
|
|
for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
|
|
cond_resched();
|
|
next = (old_addr + PMD_SIZE) & PMD_MASK;
|
|
/* even if next overflowed, extent below will be ok */
|
|
extent = next - old_addr;
|
|
if (extent > old_end - old_addr)
|
|
extent = old_end - old_addr;
|
|
old_pmd = get_old_pmd(vma->vm_mm, old_addr);
|
|
if (!old_pmd)
|
|
continue;
|
|
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
|
|
if (!new_pmd)
|
|
break;
|
|
if (pmd_trans_huge(*old_pmd)) {
|
|
int err = 0;
|
|
if (extent == HPAGE_PMD_SIZE) {
|
|
VM_BUG_ON(vma->vm_file || !vma->anon_vma);
|
|
/* See comment in move_ptes() */
|
|
if (need_rmap_locks)
|
|
anon_vma_lock_write(vma->anon_vma);
|
|
err = move_huge_pmd(vma, new_vma, old_addr,
|
|
new_addr, old_end,
|
|
old_pmd, new_pmd);
|
|
if (need_rmap_locks)
|
|
anon_vma_unlock_write(vma->anon_vma);
|
|
}
|
|
if (err > 0) {
|
|
continue;
|
|
} else if (!err) {
|
|
split_huge_page_pmd(vma, old_addr, old_pmd);
|
|
}
|
|
VM_BUG_ON(pmd_trans_huge(*old_pmd));
|
|
}
|
|
if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
|
|
new_pmd, new_addr))
|
|
break;
|
|
next = (new_addr + PMD_SIZE) & PMD_MASK;
|
|
if (extent > next - new_addr)
|
|
extent = next - new_addr;
|
|
if (extent > LATENCY_LIMIT)
|
|
extent = LATENCY_LIMIT;
|
|
move_ptes(vma, old_pmd, old_addr, old_addr + extent,
|
|
new_vma, new_pmd, new_addr, need_rmap_locks);
|
|
}
|
|
|
|
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
|
|
|
return len + old_addr - old_end; /* how much done */
|
|
}
|
|
|
|
static unsigned long move_vma(struct vm_area_struct *vma,
|
|
unsigned long old_addr, unsigned long old_len,
|
|
unsigned long new_len, unsigned long new_addr, bool *locked)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct vm_area_struct *new_vma;
|
|
unsigned long vm_flags = vma->vm_flags;
|
|
unsigned long new_pgoff;
|
|
unsigned long moved_len;
|
|
unsigned long excess = 0;
|
|
unsigned long hiwater_vm;
|
|
int split = 0;
|
|
int err;
|
|
bool need_rmap_locks;
|
|
|
|
/*
|
|
* We'd prefer to avoid failure later on in do_munmap:
|
|
* which may split one vma into three before unmapping.
|
|
*/
|
|
if (mm->map_count >= sysctl_max_map_count - 3)
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* Advise KSM to break any KSM pages in the area to be moved:
|
|
* it would be confusing if they were to turn up at the new
|
|
* location, where they happen to coincide with different KSM
|
|
* pages recently unmapped. But leave vma->vm_flags as it was,
|
|
* so KSM can come around to merge on vma and new_vma afterwards.
|
|
*/
|
|
err = ksm_madvise(vma, old_addr, old_addr + old_len,
|
|
MADV_UNMERGEABLE, &vm_flags);
|
|
if (err)
|
|
return err;
|
|
|
|
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
|
|
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
|
|
&need_rmap_locks);
|
|
if (!new_vma)
|
|
return -ENOMEM;
|
|
|
|
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
|
|
need_rmap_locks);
|
|
if (moved_len < old_len) {
|
|
/*
|
|
* On error, move entries back from new area to old,
|
|
* which will succeed since page tables still there,
|
|
* and then proceed to unmap new area instead of old.
|
|
*/
|
|
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
|
|
true);
|
|
vma = new_vma;
|
|
old_len = new_len;
|
|
old_addr = new_addr;
|
|
new_addr = -ENOMEM;
|
|
}
|
|
|
|
/* Conceal VM_ACCOUNT so old reservation is not undone */
|
|
if (vm_flags & VM_ACCOUNT) {
|
|
vma->vm_flags &= ~VM_ACCOUNT;
|
|
excess = vma->vm_end - vma->vm_start - old_len;
|
|
if (old_addr > vma->vm_start &&
|
|
old_addr + old_len < vma->vm_end)
|
|
split = 1;
|
|
}
|
|
|
|
/*
|
|
* If we failed to move page tables we still do total_vm increment
|
|
* since do_munmap() will decrement it by old_len == new_len.
|
|
*
|
|
* Since total_vm is about to be raised artificially high for a
|
|
* moment, we need to restore high watermark afterwards: if stats
|
|
* are taken meanwhile, total_vm and hiwater_vm appear too high.
|
|
* If this were a serious issue, we'd add a flag to do_munmap().
|
|
*/
|
|
hiwater_vm = mm->hiwater_vm;
|
|
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
|
|
|
|
if (do_munmap(mm, old_addr, old_len) < 0) {
|
|
/* OOM: unable to split vma, just get accounts right */
|
|
vm_unacct_memory(excess >> PAGE_SHIFT);
|
|
excess = 0;
|
|
}
|
|
mm->hiwater_vm = hiwater_vm;
|
|
|
|
/* Restore VM_ACCOUNT if one or two pieces of vma left */
|
|
if (excess) {
|
|
vma->vm_flags |= VM_ACCOUNT;
|
|
if (split)
|
|
vma->vm_next->vm_flags |= VM_ACCOUNT;
|
|
}
|
|
|
|
if (vm_flags & VM_LOCKED) {
|
|
mm->locked_vm += new_len >> PAGE_SHIFT;
|
|
*locked = true;
|
|
}
|
|
|
|
return new_addr;
|
|
}
|
|
|
|
static struct vm_area_struct *vma_to_resize(unsigned long addr,
|
|
unsigned long old_len, unsigned long new_len, unsigned long *p)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma = find_vma(mm, addr);
|
|
|
|
if (!vma || vma->vm_start > addr)
|
|
goto Efault;
|
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
goto Einval;
|
|
|
|
/* We can't remap across vm area boundaries */
|
|
if (old_len > vma->vm_end - addr)
|
|
goto Efault;
|
|
|
|
/* Need to be careful about a growing mapping */
|
|
if (new_len > old_len) {
|
|
unsigned long pgoff;
|
|
|
|
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
|
|
goto Efault;
|
|
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
|
|
pgoff += vma->vm_pgoff;
|
|
if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
|
|
goto Einval;
|
|
}
|
|
|
|
if (vma->vm_flags & VM_LOCKED) {
|
|
unsigned long locked, lock_limit;
|
|
locked = mm->locked_vm << PAGE_SHIFT;
|
|
lock_limit = rlimit(RLIMIT_MEMLOCK);
|
|
locked += new_len - old_len;
|
|
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
|
|
goto Eagain;
|
|
}
|
|
|
|
if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
|
|
goto Enomem;
|
|
|
|
if (vma->vm_flags & VM_ACCOUNT) {
|
|
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
|
|
if (security_vm_enough_memory_mm(mm, charged))
|
|
goto Efault;
|
|
*p = charged;
|
|
}
|
|
|
|
return vma;
|
|
|
|
Efault: /* very odd choice for most of the cases, but... */
|
|
return ERR_PTR(-EFAULT);
|
|
Einval:
|
|
return ERR_PTR(-EINVAL);
|
|
Enomem:
|
|
return ERR_PTR(-ENOMEM);
|
|
Eagain:
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
|
|
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
|
|
unsigned long new_addr, unsigned long new_len, bool *locked)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long ret = -EINVAL;
|
|
unsigned long charged = 0;
|
|
unsigned long map_flags;
|
|
|
|
if (new_addr & ~PAGE_MASK)
|
|
goto out;
|
|
|
|
if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
|
|
goto out;
|
|
|
|
/* Check if the location we're moving into overlaps the
|
|
* old location at all, and fail if it does.
|
|
*/
|
|
if ((new_addr <= addr) && (new_addr+new_len) > addr)
|
|
goto out;
|
|
|
|
if ((addr <= new_addr) && (addr+old_len) > new_addr)
|
|
goto out;
|
|
|
|
ret = do_munmap(mm, new_addr, new_len);
|
|
if (ret)
|
|
goto out;
|
|
|
|
if (old_len >= new_len) {
|
|
ret = do_munmap(mm, addr+new_len, old_len - new_len);
|
|
if (ret && old_len != new_len)
|
|
goto out;
|
|
old_len = new_len;
|
|
}
|
|
|
|
vma = vma_to_resize(addr, old_len, new_len, &charged);
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto out;
|
|
}
|
|
|
|
map_flags = MAP_FIXED;
|
|
if (vma->vm_flags & VM_MAYSHARE)
|
|
map_flags |= MAP_SHARED;
|
|
|
|
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
|
|
((addr - vma->vm_start) >> PAGE_SHIFT),
|
|
map_flags);
|
|
if (ret & ~PAGE_MASK)
|
|
goto out1;
|
|
|
|
ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
|
|
if (!(ret & ~PAGE_MASK))
|
|
goto out;
|
|
out1:
|
|
vm_unacct_memory(charged);
|
|
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
|
|
{
|
|
unsigned long end = vma->vm_end + delta;
|
|
if (end < vma->vm_end) /* overflow */
|
|
return 0;
|
|
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
|
|
return 0;
|
|
if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
|
|
0, MAP_FIXED) & ~PAGE_MASK)
|
|
return 0;
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* Expand (or shrink) an existing mapping, potentially moving it at the
|
|
* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
|
|
*
|
|
* MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
|
|
* This option implies MREMAP_MAYMOVE.
|
|
*/
|
|
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
|
|
unsigned long, new_len, unsigned long, flags,
|
|
unsigned long, new_addr)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long ret = -EINVAL;
|
|
unsigned long charged = 0;
|
|
bool locked = false;
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
|
|
goto out;
|
|
|
|
if (addr & ~PAGE_MASK)
|
|
goto out;
|
|
|
|
old_len = PAGE_ALIGN(old_len);
|
|
new_len = PAGE_ALIGN(new_len);
|
|
|
|
/*
|
|
* We allow a zero old-len as a special case
|
|
* for DOS-emu "duplicate shm area" thing. But
|
|
* a zero new-len is nonsensical.
|
|
*/
|
|
if (!new_len)
|
|
goto out;
|
|
|
|
if (flags & MREMAP_FIXED) {
|
|
if (flags & MREMAP_MAYMOVE)
|
|
ret = mremap_to(addr, old_len, new_addr, new_len,
|
|
&locked);
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Always allow a shrinking remap: that just unmaps
|
|
* the unnecessary pages..
|
|
* do_munmap does all the needed commit accounting
|
|
*/
|
|
if (old_len >= new_len) {
|
|
ret = do_munmap(mm, addr+new_len, old_len - new_len);
|
|
if (ret && old_len != new_len)
|
|
goto out;
|
|
ret = addr;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Ok, we need to grow..
|
|
*/
|
|
vma = vma_to_resize(addr, old_len, new_len, &charged);
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto out;
|
|
}
|
|
|
|
/* old_len exactly to the end of the area..
|
|
*/
|
|
if (old_len == vma->vm_end - addr) {
|
|
/* can we just expand the current mapping? */
|
|
if (vma_expandable(vma, new_len - old_len)) {
|
|
int pages = (new_len - old_len) >> PAGE_SHIFT;
|
|
|
|
if (vma_adjust(vma, vma->vm_start, addr + new_len,
|
|
vma->vm_pgoff, NULL)) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
|
|
if (vma->vm_flags & VM_LOCKED) {
|
|
mm->locked_vm += pages;
|
|
locked = true;
|
|
new_addr = addr;
|
|
}
|
|
ret = addr;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* We weren't able to just expand or shrink the area,
|
|
* we need to create a new one and move it..
|
|
*/
|
|
ret = -ENOMEM;
|
|
if (flags & MREMAP_MAYMOVE) {
|
|
unsigned long map_flags = 0;
|
|
if (vma->vm_flags & VM_MAYSHARE)
|
|
map_flags |= MAP_SHARED;
|
|
|
|
new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
|
|
vma->vm_pgoff +
|
|
((addr - vma->vm_start) >> PAGE_SHIFT),
|
|
map_flags);
|
|
if (new_addr & ~PAGE_MASK) {
|
|
ret = new_addr;
|
|
goto out;
|
|
}
|
|
|
|
ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
|
|
}
|
|
out:
|
|
if (ret & ~PAGE_MASK)
|
|
vm_unacct_memory(charged);
|
|
up_write(¤t->mm->mmap_sem);
|
|
if (locked && new_len > old_len)
|
|
mm_populate(new_addr + old_len, new_len - old_len);
|
|
return ret;
|
|
}
|