mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-24 18:38:38 +00:00
8f6aac419b
SPARSEMEM is a pretty nice framework that unifies quite a bit of code over all the arches. It would be great if it could be the default so that we can get rid of various forms of DISCONTIG and other variations on memory maps. So far what has hindered this are the additional lookups that SPARSEMEM introduces for virt_to_page and page_address. This goes so far that the code to do this has to be kept in a separate function and cannot be used inline. This patch introduces a virtual memmap mode for SPARSEMEM, in which the memmap is mapped into a virtually contigious area, only the active sections are physically backed. This allows virt_to_page page_address and cohorts become simple shift/add operations. No page flag fields, no table lookups, nothing involving memory is required. The two key operations pfn_to_page and page_to_page become: #define __pfn_to_page(pfn) (vmemmap + (pfn)) #define __page_to_pfn(page) ((page) - vmemmap) By having a virtual mapping for the memmap we allow simple access without wasting physical memory. As kernel memory is typically already mapped 1:1 this introduces no additional overhead. The virtual mapping must be big enough to allow a struct page to be allocated and mapped for all valid physical pages. This vill make a virtual memmap difficult to use on 32 bit platforms that support 36 address bits. However, if there is enough virtual space available and the arch already maps its 1-1 kernel space using TLBs (f.e. true of IA64 and x86_64) then this technique makes SPARSEMEM lookups even more efficient than CONFIG_FLATMEM. FLATMEM needs to read the contents of the mem_map variable to get the start of the memmap and then add the offset to the required entry. vmemmap is a constant to which we can simply add the offset. This patch has the potential to allow us to make SPARSMEM the default (and even the only) option for most systems. It should be optimal on UP, SMP and NUMA on most platforms. Then we may even be able to remove the other memory models: FLATMEM, DISCONTIG etc. [apw@shadowen.org: config cleanups, resplit code etc] [kamezawa.hiroyu@jp.fujitsu.com: Fix sparsemem_vmemmap init] [apw@shadowen.org: vmemmap: remove excess debugging] [apw@shadowen.org: simplify initialisation code and reduce duplication] [apw@shadowen.org: pull out the vmemmap code into its own file] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@suse.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
87 lines
2.2 KiB
C
87 lines
2.2 KiB
C
#ifndef __ASM_MEMORY_MODEL_H
|
|
#define __ASM_MEMORY_MODEL_H
|
|
|
|
#ifdef __KERNEL__
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#if defined(CONFIG_FLATMEM)
|
|
|
|
#ifndef ARCH_PFN_OFFSET
|
|
#define ARCH_PFN_OFFSET (0UL)
|
|
#endif
|
|
|
|
#elif defined(CONFIG_DISCONTIGMEM)
|
|
|
|
#ifndef arch_pfn_to_nid
|
|
#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
|
|
#endif
|
|
|
|
#ifndef arch_local_page_offset
|
|
#define arch_local_page_offset(pfn, nid) \
|
|
((pfn) - NODE_DATA(nid)->node_start_pfn)
|
|
#endif
|
|
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
|
|
|
/*
|
|
* supports 3 memory models.
|
|
*/
|
|
#if defined(CONFIG_FLATMEM)
|
|
|
|
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
|
|
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
|
|
ARCH_PFN_OFFSET)
|
|
#elif defined(CONFIG_DISCONTIGMEM)
|
|
|
|
#define __pfn_to_page(pfn) \
|
|
({ unsigned long __pfn = (pfn); \
|
|
unsigned long __nid = arch_pfn_to_nid(pfn); \
|
|
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
|
|
})
|
|
|
|
#define __page_to_pfn(pg) \
|
|
({ struct page *__pg = (pg); \
|
|
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
|
|
(unsigned long)(__pg - __pgdat->node_mem_map) + \
|
|
__pgdat->node_start_pfn; \
|
|
})
|
|
|
|
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
|
|
|
|
/* memmap is virtually contigious. */
|
|
#define __pfn_to_page(pfn) (vmemmap + (pfn))
|
|
#define __page_to_pfn(page) ((page) - vmemmap)
|
|
|
|
#elif defined(CONFIG_SPARSEMEM)
|
|
/*
|
|
* Note: section's mem_map is encorded to reflect its start_pfn.
|
|
* section[i].section_mem_map == mem_map's address - start_pfn;
|
|
*/
|
|
#define __page_to_pfn(pg) \
|
|
({ struct page *__pg = (pg); \
|
|
int __sec = page_to_section(__pg); \
|
|
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
|
|
})
|
|
|
|
#define __pfn_to_page(pfn) \
|
|
({ unsigned long __pfn = (pfn); \
|
|
struct mem_section *__sec = __pfn_to_section(__pfn); \
|
|
__section_mem_map_addr(__sec) + __pfn; \
|
|
})
|
|
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
|
|
|
|
#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
|
|
struct page;
|
|
/* this is useful when inlined pfn_to_page is too big */
|
|
extern struct page *pfn_to_page(unsigned long pfn);
|
|
extern unsigned long page_to_pfn(struct page *page);
|
|
#else
|
|
#define page_to_pfn __page_to_pfn
|
|
#define pfn_to_page __pfn_to_page
|
|
#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif
|