mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 21:01:29 +00:00
cdd6c482c9
Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\<event\>/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
321 lines
7.7 KiB
C
321 lines
7.7 KiB
C
/*
|
|
* mm/mprotect.c
|
|
*
|
|
* (C) Copyright 1994 Linus Torvalds
|
|
* (C) Copyright 2002 Christoph Hellwig
|
|
*
|
|
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
|
|
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/security.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/personality.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/perf_event.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#ifndef pgprot_modify
|
|
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
|
{
|
|
return newprot;
|
|
}
|
|
#endif
|
|
|
|
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
pte_t *pte, oldpte;
|
|
spinlock_t *ptl;
|
|
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
arch_enter_lazy_mmu_mode();
|
|
do {
|
|
oldpte = *pte;
|
|
if (pte_present(oldpte)) {
|
|
pte_t ptent;
|
|
|
|
ptent = ptep_modify_prot_start(mm, addr, pte);
|
|
ptent = pte_modify(ptent, newprot);
|
|
|
|
/*
|
|
* Avoid taking write faults for pages we know to be
|
|
* dirty.
|
|
*/
|
|
if (dirty_accountable && pte_dirty(ptent))
|
|
ptent = pte_mkwrite(ptent);
|
|
|
|
ptep_modify_prot_commit(mm, addr, pte, ptent);
|
|
} else if (PAGE_MIGRATION && !pte_file(oldpte)) {
|
|
swp_entry_t entry = pte_to_swp_entry(oldpte);
|
|
|
|
if (is_write_migration_entry(entry)) {
|
|
/*
|
|
* A protection check is difficult so
|
|
* just be safe and disable write
|
|
*/
|
|
make_migration_entry_read(&entry);
|
|
set_pte_at(mm, addr, pte,
|
|
swp_entry_to_pte(entry));
|
|
}
|
|
}
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
arch_leave_lazy_mmu_mode();
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
}
|
|
|
|
static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
continue;
|
|
change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
|
|
} while (pmd++, addr = next, addr != end);
|
|
}
|
|
|
|
static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
if (pud_none_or_clear_bad(pud))
|
|
continue;
|
|
change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
static void change_protection(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pgd_t *pgd;
|
|
unsigned long next;
|
|
unsigned long start = addr;
|
|
|
|
BUG_ON(addr >= end);
|
|
pgd = pgd_offset(mm, addr);
|
|
flush_cache_range(vma, addr, end);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
continue;
|
|
change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
|
|
} while (pgd++, addr = next, addr != end);
|
|
flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
int
|
|
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
unsigned long start, unsigned long end, unsigned long newflags)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
unsigned long oldflags = vma->vm_flags;
|
|
long nrpages = (end - start) >> PAGE_SHIFT;
|
|
unsigned long charged = 0;
|
|
pgoff_t pgoff;
|
|
int error;
|
|
int dirty_accountable = 0;
|
|
|
|
if (newflags == oldflags) {
|
|
*pprev = vma;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* If we make a private mapping writable we increase our commit;
|
|
* but (without finer accounting) cannot reduce our commit if we
|
|
* make it unwritable again. hugetlb mapping were accounted for
|
|
* even if read-only so there is no need to account for them here
|
|
*/
|
|
if (newflags & VM_WRITE) {
|
|
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
|
|
VM_SHARED|VM_NORESERVE))) {
|
|
charged = nrpages;
|
|
if (security_vm_enough_memory(charged))
|
|
return -ENOMEM;
|
|
newflags |= VM_ACCOUNT;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* First try to merge with previous and/or next vma.
|
|
*/
|
|
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
|
*pprev = vma_merge(mm, *pprev, start, end, newflags,
|
|
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
|
|
if (*pprev) {
|
|
vma = *pprev;
|
|
goto success;
|
|
}
|
|
|
|
*pprev = vma;
|
|
|
|
if (start != vma->vm_start) {
|
|
error = split_vma(mm, vma, start, 1);
|
|
if (error)
|
|
goto fail;
|
|
}
|
|
|
|
if (end != vma->vm_end) {
|
|
error = split_vma(mm, vma, end, 0);
|
|
if (error)
|
|
goto fail;
|
|
}
|
|
|
|
success:
|
|
/*
|
|
* vm_flags and vm_page_prot are protected by the mmap_sem
|
|
* held in write mode.
|
|
*/
|
|
vma->vm_flags = newflags;
|
|
vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
|
|
vm_get_page_prot(newflags));
|
|
|
|
if (vma_wants_writenotify(vma)) {
|
|
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
|
|
dirty_accountable = 1;
|
|
}
|
|
|
|
mmu_notifier_invalidate_range_start(mm, start, end);
|
|
if (is_vm_hugetlb_page(vma))
|
|
hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
|
|
else
|
|
change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
|
|
mmu_notifier_invalidate_range_end(mm, start, end);
|
|
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
|
|
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
|
|
return 0;
|
|
|
|
fail:
|
|
vm_unacct_memory(charged);
|
|
return error;
|
|
}
|
|
|
|
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
|
|
unsigned long, prot)
|
|
{
|
|
unsigned long vm_flags, nstart, end, tmp, reqprot;
|
|
struct vm_area_struct *vma, *prev;
|
|
int error = -EINVAL;
|
|
const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
|
|
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
|
|
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
|
|
return -EINVAL;
|
|
|
|
if (start & ~PAGE_MASK)
|
|
return -EINVAL;
|
|
if (!len)
|
|
return 0;
|
|
len = PAGE_ALIGN(len);
|
|
end = start + len;
|
|
if (end <= start)
|
|
return -ENOMEM;
|
|
if (!arch_validate_prot(prot))
|
|
return -EINVAL;
|
|
|
|
reqprot = prot;
|
|
/*
|
|
* Does the application expect PROT_READ to imply PROT_EXEC:
|
|
*/
|
|
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
|
|
prot |= PROT_EXEC;
|
|
|
|
vm_flags = calc_vm_prot_bits(prot);
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
vma = find_vma_prev(current->mm, start, &prev);
|
|
error = -ENOMEM;
|
|
if (!vma)
|
|
goto out;
|
|
if (unlikely(grows & PROT_GROWSDOWN)) {
|
|
if (vma->vm_start >= end)
|
|
goto out;
|
|
start = vma->vm_start;
|
|
error = -EINVAL;
|
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
goto out;
|
|
}
|
|
else {
|
|
if (vma->vm_start > start)
|
|
goto out;
|
|
if (unlikely(grows & PROT_GROWSUP)) {
|
|
end = vma->vm_end;
|
|
error = -EINVAL;
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
goto out;
|
|
}
|
|
}
|
|
if (start > vma->vm_start)
|
|
prev = vma;
|
|
|
|
for (nstart = start ; ; ) {
|
|
unsigned long newflags;
|
|
|
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
|
|
|
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
|
|
|
|
/* newflags >> 4 shift VM_MAY% in place of VM_% */
|
|
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
|
|
error = -EACCES;
|
|
goto out;
|
|
}
|
|
|
|
error = security_file_mprotect(vma, reqprot, prot);
|
|
if (error)
|
|
goto out;
|
|
|
|
tmp = vma->vm_end;
|
|
if (tmp > end)
|
|
tmp = end;
|
|
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
|
|
if (error)
|
|
goto out;
|
|
perf_event_mmap(vma);
|
|
nstart = tmp;
|
|
|
|
if (nstart < prev->vm_end)
|
|
nstart = prev->vm_end;
|
|
if (nstart >= end)
|
|
goto out;
|
|
|
|
vma = prev->vm_next;
|
|
if (!vma || vma->vm_start != nstart) {
|
|
error = -ENOMEM;
|
|
goto out;
|
|
}
|
|
}
|
|
out:
|
|
up_write(¤t->mm->mmap_sem);
|
|
return error;
|
|
}
|