mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 00:42:16 +00:00
cdfd4325c0
With Mel's hugetlb private reservation support patches applied, strict overcommit semantics are applied to both shared and private huge page mappings. This can be a problem if an application relied on unlimited overcommit semantics for private mappings. An example of this would be an application which maps a huge area with the intention of using it very sparsely. These application would benefit from being able to opt-out of the strict overcommit. It should be noted that prior to hugetlb supporting demand faulting all mappings were fully populated and so applications of this type should be rare. This patch stack implements the MAP_NORESERVE mmap() flag for huge page mappings. This flag has the same meaning as for small page mappings, suppressing reservations for that mapping. Thanks to Mel Gorman for reviewing a number of early versions of these patches. This patch: When a small page mapping is created with mmap() reservations are created by default for any memory pages required. When the region is read/write the reservation is increased for every page, no reservation is needed for read-only regions (as they implicitly share the zero page). Reservations are tracked via the VM_ACCOUNT vma flag which is present when the region has reservation backing it. When we convert a region from read-only to read-write new reservations are aquired and VM_ACCOUNT is set. However, when a read-only map is created with MAP_NORESERVE it is indistinguishable from a normal mapping. When we then convert that to read/write we are forced to incorrectly create reservations for it as we have no record of the original MAP_NORESERVE. This patch introduces a new vma flag VM_NORESERVE which records the presence of the original MAP_NORESERVE flag. This allows us to distinguish these two circumstances and correctly account the reserve. As well as fixing this FIXME in the code, this makes it much easier to introduce MAP_NORESERVE support for huge pages as this flag is available consistantly for the life of the mapping. VM_ACCOUNT on the other hand is heavily used at the generic level in association with small pages. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Adam Litke <agl@us.ibm.com> Cc: Johannes Weiner <hannes@saeurebad.de> Cc: Andy Whitcroft <apw@shadowen.org> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Michael Kerrisk <mtk.manpages@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
317 lines
7.3 KiB
C
317 lines
7.3 KiB
C
/*
|
|
* mm/mprotect.c
|
|
*
|
|
* (C) Copyright 1994 Linus Torvalds
|
|
* (C) Copyright 2002 Christoph Hellwig
|
|
*
|
|
* Address space accounting code <alan@redhat.com>
|
|
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/security.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/personality.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#ifndef pgprot_modify
|
|
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
|
{
|
|
return newprot;
|
|
}
|
|
#endif
|
|
|
|
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
pte_t *pte, oldpte;
|
|
spinlock_t *ptl;
|
|
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
arch_enter_lazy_mmu_mode();
|
|
do {
|
|
oldpte = *pte;
|
|
if (pte_present(oldpte)) {
|
|
pte_t ptent;
|
|
|
|
ptent = ptep_modify_prot_start(mm, addr, pte);
|
|
ptent = pte_modify(ptent, newprot);
|
|
|
|
/*
|
|
* Avoid taking write faults for pages we know to be
|
|
* dirty.
|
|
*/
|
|
if (dirty_accountable && pte_dirty(ptent))
|
|
ptent = pte_mkwrite(ptent);
|
|
|
|
ptep_modify_prot_commit(mm, addr, pte, ptent);
|
|
#ifdef CONFIG_MIGRATION
|
|
} else if (!pte_file(oldpte)) {
|
|
swp_entry_t entry = pte_to_swp_entry(oldpte);
|
|
|
|
if (is_write_migration_entry(entry)) {
|
|
/*
|
|
* A protection check is difficult so
|
|
* just be safe and disable write
|
|
*/
|
|
make_migration_entry_read(&entry);
|
|
set_pte_at(mm, addr, pte,
|
|
swp_entry_to_pte(entry));
|
|
}
|
|
#endif
|
|
}
|
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
arch_leave_lazy_mmu_mode();
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
}
|
|
|
|
static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
continue;
|
|
change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
|
|
} while (pmd++, addr = next, addr != end);
|
|
}
|
|
|
|
static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
if (pud_none_or_clear_bad(pud))
|
|
continue;
|
|
change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
static void change_protection(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pgd_t *pgd;
|
|
unsigned long next;
|
|
unsigned long start = addr;
|
|
|
|
BUG_ON(addr >= end);
|
|
pgd = pgd_offset(mm, addr);
|
|
flush_cache_range(vma, addr, end);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
continue;
|
|
change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
|
|
} while (pgd++, addr = next, addr != end);
|
|
flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
int
|
|
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
unsigned long start, unsigned long end, unsigned long newflags)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
unsigned long oldflags = vma->vm_flags;
|
|
long nrpages = (end - start) >> PAGE_SHIFT;
|
|
unsigned long charged = 0;
|
|
pgoff_t pgoff;
|
|
int error;
|
|
int dirty_accountable = 0;
|
|
|
|
if (newflags == oldflags) {
|
|
*pprev = vma;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* If we make a private mapping writable we increase our commit;
|
|
* but (without finer accounting) cannot reduce our commit if we
|
|
* make it unwritable again.
|
|
*/
|
|
if (newflags & VM_WRITE) {
|
|
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|
|
|
VM_SHARED|VM_NORESERVE))) {
|
|
charged = nrpages;
|
|
if (security_vm_enough_memory(charged))
|
|
return -ENOMEM;
|
|
newflags |= VM_ACCOUNT;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* First try to merge with previous and/or next vma.
|
|
*/
|
|
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
|
*pprev = vma_merge(mm, *pprev, start, end, newflags,
|
|
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
|
|
if (*pprev) {
|
|
vma = *pprev;
|
|
goto success;
|
|
}
|
|
|
|
*pprev = vma;
|
|
|
|
if (start != vma->vm_start) {
|
|
error = split_vma(mm, vma, start, 1);
|
|
if (error)
|
|
goto fail;
|
|
}
|
|
|
|
if (end != vma->vm_end) {
|
|
error = split_vma(mm, vma, end, 0);
|
|
if (error)
|
|
goto fail;
|
|
}
|
|
|
|
success:
|
|
/*
|
|
* vm_flags and vm_page_prot are protected by the mmap_sem
|
|
* held in write mode.
|
|
*/
|
|
vma->vm_flags = newflags;
|
|
vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
|
|
vm_get_page_prot(newflags));
|
|
|
|
if (vma_wants_writenotify(vma)) {
|
|
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
|
|
dirty_accountable = 1;
|
|
}
|
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
|
|
else
|
|
change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
|
|
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
|
|
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
|
|
return 0;
|
|
|
|
fail:
|
|
vm_unacct_memory(charged);
|
|
return error;
|
|
}
|
|
|
|
asmlinkage long
|
|
sys_mprotect(unsigned long start, size_t len, unsigned long prot)
|
|
{
|
|
unsigned long vm_flags, nstart, end, tmp, reqprot;
|
|
struct vm_area_struct *vma, *prev;
|
|
int error = -EINVAL;
|
|
const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
|
|
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
|
|
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
|
|
return -EINVAL;
|
|
|
|
if (start & ~PAGE_MASK)
|
|
return -EINVAL;
|
|
if (!len)
|
|
return 0;
|
|
len = PAGE_ALIGN(len);
|
|
end = start + len;
|
|
if (end <= start)
|
|
return -ENOMEM;
|
|
if (!arch_validate_prot(prot))
|
|
return -EINVAL;
|
|
|
|
reqprot = prot;
|
|
/*
|
|
* Does the application expect PROT_READ to imply PROT_EXEC:
|
|
*/
|
|
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
|
|
prot |= PROT_EXEC;
|
|
|
|
vm_flags = calc_vm_prot_bits(prot);
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
vma = find_vma_prev(current->mm, start, &prev);
|
|
error = -ENOMEM;
|
|
if (!vma)
|
|
goto out;
|
|
if (unlikely(grows & PROT_GROWSDOWN)) {
|
|
if (vma->vm_start >= end)
|
|
goto out;
|
|
start = vma->vm_start;
|
|
error = -EINVAL;
|
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
goto out;
|
|
}
|
|
else {
|
|
if (vma->vm_start > start)
|
|
goto out;
|
|
if (unlikely(grows & PROT_GROWSUP)) {
|
|
end = vma->vm_end;
|
|
error = -EINVAL;
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
goto out;
|
|
}
|
|
}
|
|
if (start > vma->vm_start)
|
|
prev = vma;
|
|
|
|
for (nstart = start ; ; ) {
|
|
unsigned long newflags;
|
|
|
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
|
|
|
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
|
|
|
|
/* newflags >> 4 shift VM_MAY% in place of VM_% */
|
|
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
|
|
error = -EACCES;
|
|
goto out;
|
|
}
|
|
|
|
error = security_file_mprotect(vma, reqprot, prot);
|
|
if (error)
|
|
goto out;
|
|
|
|
tmp = vma->vm_end;
|
|
if (tmp > end)
|
|
tmp = end;
|
|
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
|
|
if (error)
|
|
goto out;
|
|
nstart = tmp;
|
|
|
|
if (nstart < prev->vm_end)
|
|
nstart = prev->vm_end;
|
|
if (nstart >= end)
|
|
goto out;
|
|
|
|
vma = prev->vm_next;
|
|
if (!vma || vma->vm_start != nstart) {
|
|
error = -ENOMEM;
|
|
goto out;
|
|
}
|
|
}
|
|
out:
|
|
up_write(¤t->mm->mmap_sem);
|
|
return error;
|
|
}
|