2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
|
|
|
|
* Copyright 2003 PathScale, Inc.
|
|
|
|
* Licensed under the GPL
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __UM_PAGE_H
|
|
|
|
#define __UM_PAGE_H
|
|
|
|
|
2008-05-12 21:01:56 +00:00
|
|
|
#include <linux/const.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* PAGE_SHIFT determines the page size */
|
|
|
|
#define PAGE_SHIFT 12
|
2008-05-12 21:01:56 +00:00
|
|
|
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
|
2005-04-16 22:20:36 +00:00
|
|
|
#define PAGE_MASK (~(PAGE_SIZE-1))
|
|
|
|
|
2008-05-12 21:01:56 +00:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
struct page;
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
2011-08-18 19:02:19 +00:00
|
|
|
#include <asm/vm-flags.h>
|
2008-05-12 21:01:56 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* These are used to make use of C type-checking..
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
|
|
|
|
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
|
|
|
|
|
|
|
|
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
|
|
|
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
|
|
|
|
2005-05-01 15:58:54 +00:00
|
|
|
#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
typedef struct { unsigned long pte_low, pte_high; } pte_t;
|
2008-02-05 06:30:55 +00:00
|
|
|
typedef struct { unsigned long pmd; } pmd_t;
|
2005-04-16 22:20:36 +00:00
|
|
|
typedef struct { unsigned long pgd; } pgd_t;
|
|
|
|
#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
|
|
|
|
|
|
|
|
#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
|
|
|
|
#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
|
|
|
|
#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
|
|
|
|
#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
|
|
|
|
smp_wmb(); \
|
|
|
|
(to).pte_low = (from).pte_low; })
|
|
|
|
#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
|
|
|
|
#define pte_set_val(pte, phys, prot) \
|
|
|
|
({ (pte).pte_high = (phys) >> 32; \
|
|
|
|
(pte).pte_low = (phys) | pgprot_val(prot); })
|
|
|
|
|
2005-05-05 23:15:15 +00:00
|
|
|
#define pmd_val(x) ((x).pmd)
|
|
|
|
#define __pmd(x) ((pmd_t) { (x) } )
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
typedef unsigned long long pfn_t;
|
|
|
|
typedef unsigned long long phys_t;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
typedef struct { unsigned long pte; } pte_t;
|
|
|
|
typedef struct { unsigned long pgd; } pgd_t;
|
|
|
|
|
|
|
|
#ifdef CONFIG_3_LEVEL_PGTABLES
|
|
|
|
typedef struct { unsigned long pmd; } pmd_t;
|
|
|
|
#define pmd_val(x) ((x).pmd)
|
|
|
|
#define __pmd(x) ((pmd_t) { (x) } )
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define pte_val(x) ((x).pte)
|
|
|
|
|
|
|
|
|
|
|
|
#define pte_get_bits(p, bits) ((p).pte & (bits))
|
|
|
|
#define pte_set_bits(p, bits) ((p).pte |= (bits))
|
|
|
|
#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
|
|
|
|
#define pte_copy(to, from) ((to).pte = (from).pte)
|
|
|
|
#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
|
|
|
|
#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
|
|
|
|
|
|
|
|
typedef unsigned long pfn_t;
|
|
|
|
typedef unsigned long phys_t;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
typedef struct { unsigned long pgprot; } pgprot_t;
|
|
|
|
|
2008-02-08 12:22:04 +00:00
|
|
|
typedef struct page *pgtable_t;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#define pgd_val(x) ((x).pgd)
|
|
|
|
#define pgprot_val(x) ((x).pgprot)
|
|
|
|
|
|
|
|
#define __pte(x) ((pte_t) { (x) } )
|
|
|
|
#define __pgd(x) ((pgd_t) { (x) } )
|
|
|
|
#define __pgprot(x) ((pgprot_t) { (x) } )
|
|
|
|
|
|
|
|
extern unsigned long uml_physmem;
|
|
|
|
|
|
|
|
#define PAGE_OFFSET (uml_physmem)
|
|
|
|
#define KERNELBASE PAGE_OFFSET
|
|
|
|
|
|
|
|
#define __va_space (8*1024*1024)
|
|
|
|
|
2012-10-08 02:27:32 +00:00
|
|
|
#include <mem.h>
|
2005-05-28 22:51:53 +00:00
|
|
|
|
|
|
|
/* Cast to unsigned long before casting to void * to avoid a warning from
|
|
|
|
* mmap_kmem about cutting a long long down to a void *. Not sure that
|
|
|
|
* casting is the right thing, but 32-bit UML can't have 64-bit virtual
|
|
|
|
* addresses
|
|
|
|
*/
|
2005-08-16 00:40:46 +00:00
|
|
|
#define __pa(virt) to_phys((void *) (unsigned long) (virt))
|
|
|
|
#define __va(phys) to_virt((unsigned long) (phys))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-05 06:30:55 +00:00
|
|
|
#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
|
|
|
|
#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define pfn_valid(pfn) ((pfn) < max_mapnr)
|
|
|
|
#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
|
|
|
|
|
2006-03-27 09:15:50 +00:00
|
|
|
#include <asm-generic/memory_model.h>
|
2009-05-13 22:56:30 +00:00
|
|
|
#include <asm-generic/getorder.h>
|
2005-09-03 22:54:30 +00:00
|
|
|
|
2008-05-12 21:01:56 +00:00
|
|
|
#endif /* __ASSEMBLY__ */
|
arm64,ia64,ppc,s390,sh,tile,um,x86,mm: remove default gate area
The core mm code will provide a default gate area based on
FIXADDR_USER_START and FIXADDR_USER_END if
!defined(__HAVE_ARCH_GATE_AREA) && defined(AT_SYSINFO_EHDR).
This default is only useful for ia64. arm64, ppc, s390, sh, tile, 64-bit
UML, and x86_32 have their own code just to disable it. arm, 32-bit UML,
and x86_64 have gate areas, but they have their own implementations.
This gets rid of the default and moves the code into ia64.
This should save some code on architectures without a gate area: it's now
possible to inline the gate_area functions in the default case.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Acked-by: Nathan Lynch <nathan_lynch@mentor.com>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [in principle]
Acked-by: Richard Weinberger <richard@nod.at> [for um]
Acked-by: Will Deacon <will.deacon@arm.com> [for arm64]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Nathan Lynch <Nathan_Lynch@mentor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:23:40 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
#define __HAVE_ARCH_GATE_AREA 1
|
|
|
|
#endif
|
|
|
|
|
2008-05-12 21:01:56 +00:00
|
|
|
#endif /* __UM_PAGE_H */
|