From b845f313d78e4e259ec449909e3bbadf77b53a6d Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Tue, 8 Jul 2008 00:28:51 +1000 Subject: [PATCH] mm: Allow architectures to define additional protection bits This patch allows architectures to define functions to deal with additional protections bits for mmap() and mprotect(). arch_calc_vm_prot_bits() maps additonal protection bits to vm_flags arch_vm_get_page_prot() maps additional vm_flags to the vma's vm_page_prot arch_validate_prot() checks for valid values of the protection bits Note: vm_get_page_prot() is now pretty ugly, but the generated code should be identical for architectures that don't define additional protection bits. Signed-off-by: Dave Kleikamp Acked-by: Andrew Morton Acked-by: Hugh Dickins Signed-off-by: Benjamin Herrenschmidt --- include/linux/mman.h | 29 ++++++++++++++++++++++++++++- mm/mmap.c | 5 +++-- mm/mprotect.c | 2 +- 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/include/linux/mman.h b/include/linux/mman.h index dab8892e6ff..30d1073bac3 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -33,6 +33,32 @@ static inline void vm_unacct_memory(long pages) vm_acct_memory(-pages); } +/* + * Allow architectures to handle additional protection bits + */ + +#ifndef arch_calc_vm_prot_bits +#define arch_calc_vm_prot_bits(prot) 0 +#endif + +#ifndef arch_vm_get_page_prot +#define arch_vm_get_page_prot(vm_flags) __pgprot(0) +#endif + +#ifndef arch_validate_prot +/* + * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have + * already been masked out. + * + * Returns true if the prot flags are valid + */ +static inline int arch_validate_prot(unsigned long prot) +{ + return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; +} +#define arch_validate_prot arch_validate_prot +#endif + /* * Optimisation macro. It is equivalent to: * (x & bit1) ? bit2 : 0 @@ -51,7 +77,8 @@ calc_vm_prot_bits(unsigned long prot) { return _calc_vm_trans(prot, PROT_READ, VM_READ ) | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | - _calc_vm_trans(prot, PROT_EXEC, VM_EXEC ); + _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | + arch_calc_vm_prot_bits(prot); } /* diff --git a/mm/mmap.c b/mm/mmap.c index 3354fdd83d4..1d102b956fd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -72,8 +72,9 @@ pgprot_t protection_map[16] = { pgprot_t vm_get_page_prot(unsigned long vm_flags) { - return protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + return __pgprot(pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | + pgprot_val(arch_vm_get_page_prot(vm_flags))); } EXPORT_SYMBOL(vm_get_page_prot); diff --git a/mm/mprotect.c b/mm/mprotect.c index a5bf31c2737..ecfaa5844b5 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -239,7 +239,7 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) end = start + len; if (end <= start) return -ENOMEM; - if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) + if (!arch_validate_prot(prot)) return -EINVAL; reqprot = prot;