mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-28 04:17:47 +00:00
7b4b2a0d6c
Commit "mm: introduce new field 'managed_pages' to struct zone" assumes that all highmem pages will be freed into the buddy system by function mem_init(). But that's not always true, some architectures may reserve some highmem pages during boot. For example PPC may allocate highmem pages for giagant HugeTLB pages, and several architectures have code to check PageReserved flag to exclude highmem pages allocated during boot when freeing highmem pages into the buddy system. So treat highmem pages in the same way as normal pages, that is to: 1) reset zone->managed_pages to zero in mem_init(). 2) recalculate managed_pages when freeing pages into the buddy system. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Tejun Heo <tj@kernel.org> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan@kernel.org> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: <sworddragon2@aol.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
147 lines
3.5 KiB
C
147 lines
3.5 KiB
C
#include <linux/highmem.h>
|
|
#include <linux/module.h>
|
|
#include <linux/swap.h> /* for totalram_pages */
|
|
#include <linux/bootmem.h>
|
|
|
|
void *kmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
return kmap_high(page);
|
|
}
|
|
EXPORT_SYMBOL(kmap);
|
|
|
|
void kunmap(struct page *page)
|
|
{
|
|
if (in_interrupt())
|
|
BUG();
|
|
if (!PageHighMem(page))
|
|
return;
|
|
kunmap_high(page);
|
|
}
|
|
EXPORT_SYMBOL(kunmap);
|
|
|
|
/*
|
|
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
|
* no global lock is needed and because the kmap code must perform a global TLB
|
|
* invalidation when the kmap pool wraps.
|
|
*
|
|
* However when holding an atomic kmap it is not legal to sleep, so atomic
|
|
* kmaps are appropriate for short, tight code paths only.
|
|
*/
|
|
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
{
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
pagefault_disable();
|
|
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
|
|
type = kmap_atomic_idx_push();
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void *)vaddr;
|
|
}
|
|
EXPORT_SYMBOL(kmap_atomic_prot);
|
|
|
|
void *kmap_atomic(struct page *page)
|
|
{
|
|
return kmap_atomic_prot(page, kmap_prot);
|
|
}
|
|
EXPORT_SYMBOL(kmap_atomic);
|
|
|
|
/*
|
|
* This is the same as kmap_atomic() but can map memory that doesn't
|
|
* have a struct page associated with it.
|
|
*/
|
|
void *kmap_atomic_pfn(unsigned long pfn)
|
|
{
|
|
return kmap_atomic_prot_pfn(pfn, kmap_prot);
|
|
}
|
|
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
|
|
|
|
void __kunmap_atomic(void *kvaddr)
|
|
{
|
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
|
|
|
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
|
|
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
|
|
int idx, type;
|
|
|
|
type = kmap_atomic_idx();
|
|
idx = type + KM_TYPE_NR * smp_processor_id();
|
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
|
#endif
|
|
/*
|
|
* Force other mappings to Oops if they'll try to access this
|
|
* pte without first remap it. Keeping stale mappings around
|
|
* is a bad idea also, in case the page changes cacheability
|
|
* attributes or becomes a protected page in a hypervisor.
|
|
*/
|
|
kpte_clear_flush(kmap_pte-idx, vaddr);
|
|
kmap_atomic_idx_pop();
|
|
arch_flush_lazy_mmu_mode();
|
|
}
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
else {
|
|
BUG_ON(vaddr < PAGE_OFFSET);
|
|
BUG_ON(vaddr >= (unsigned long)high_memory);
|
|
}
|
|
#endif
|
|
|
|
pagefault_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
struct page *kmap_atomic_to_page(void *ptr)
|
|
{
|
|
unsigned long idx, vaddr = (unsigned long)ptr;
|
|
pte_t *pte;
|
|
|
|
if (vaddr < FIXADDR_START)
|
|
return virt_to_page(ptr);
|
|
|
|
idx = virt_to_fix(vaddr);
|
|
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
|
|
return pte_page(*pte);
|
|
}
|
|
EXPORT_SYMBOL(kmap_atomic_to_page);
|
|
|
|
void __init set_highmem_pages_init(void)
|
|
{
|
|
struct zone *zone;
|
|
int nid;
|
|
|
|
/*
|
|
* Explicitly reset zone->managed_pages because set_highmem_pages_init()
|
|
* is invoked before free_all_bootmem()
|
|
*/
|
|
reset_all_zones_managed_pages();
|
|
for_each_zone(zone) {
|
|
unsigned long zone_start_pfn, zone_end_pfn;
|
|
|
|
if (!is_highmem(zone))
|
|
continue;
|
|
|
|
zone_start_pfn = zone->zone_start_pfn;
|
|
zone_end_pfn = zone_start_pfn + zone->spanned_pages;
|
|
|
|
nid = zone_to_nid(zone);
|
|
printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
|
|
zone->name, nid, zone_start_pfn, zone_end_pfn);
|
|
|
|
add_highpages_with_active_regions(nid, zone_start_pfn,
|
|
zone_end_pfn);
|
|
}
|
|
}
|