mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 21:01:29 +00:00
fde740e4dd
This patch introduces using the quicklists for pgd, pmd, and pte levels by combining the alloc and free functions into a common set of routines. This greatly simplifies the reading of this header file. This patch is simple but necessary for large numa configurations. It simply ensures that only pages from the local node are added to a cpus quicklist. This prevents the trapping of pages on a remote nodes quicklist by starting a process, touching a large number of pages to fill pmd and pte entries, migrating to another node, and then unmapping or exiting. With those conditions, the pages get trapped and if the machine has more than 100 nodes of the same size, the calculation of the pgtable high water mark will be larger than any single node so page table cache flushing will never occur. I ran lmbench lat_proc fork and lat_proc exec on a zx1 with and without this patch and did not notice any change. On an sn2 machine, there was a slight improvement which is possibly due to pages from other nodes trapped on the test node before starting the run. I did not investigate further. This patch shrinks the quicklist based upon free memory on the node instead of the high/low water marks. I have written it to enable preemption periodically and recalculate the amount to shrink every time we have freed enough pages that the quicklist size should have grown. I rescan the nodes zones each pass because other processess may be draining node memory at the same time as we are adding. Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
146 lines
3.1 KiB
C
146 lines
3.1 KiB
C
#ifndef _ASM_IA64_PGALLOC_H
|
|
#define _ASM_IA64_PGALLOC_H
|
|
|
|
/*
|
|
* This file contains the functions and defines necessary to allocate
|
|
* page tables.
|
|
*
|
|
* This hopefully works with any (fixed) ia-64 page-size, as defined
|
|
* in <asm/page.h> (currently 8192).
|
|
*
|
|
* Copyright (C) 1998-2001 Hewlett-Packard Co
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
* Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
|
|
*/
|
|
|
|
#include <linux/config.h>
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/page-flags.h>
|
|
#include <linux/threads.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
|
|
#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
|
|
DECLARE_PER_CPU(long, __pgtable_quicklist_size);
|
|
#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
|
|
|
|
static inline long pgtable_quicklist_total_size(void)
|
|
{
|
|
long ql_size;
|
|
int cpuid;
|
|
|
|
for_each_online_cpu(cpuid) {
|
|
ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
|
|
}
|
|
return ql_size;
|
|
}
|
|
|
|
static inline void *pgtable_quicklist_alloc(void)
|
|
{
|
|
unsigned long *ret = NULL;
|
|
|
|
preempt_disable();
|
|
|
|
ret = pgtable_quicklist;
|
|
if (likely(ret != NULL)) {
|
|
pgtable_quicklist = (unsigned long *)(*ret);
|
|
ret[0] = 0;
|
|
--pgtable_quicklist_size;
|
|
} else {
|
|
ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline void pgtable_quicklist_free(void *pgtable_entry)
|
|
{
|
|
#ifdef CONFIG_NUMA
|
|
unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
|
|
|
|
if (unlikely(nid != numa_node_id())) {
|
|
free_page((unsigned long)pgtable_entry);
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
preempt_disable();
|
|
*(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
|
|
pgtable_quicklist = (unsigned long *)pgtable_entry;
|
|
++pgtable_quicklist_size;
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
return pgtable_quicklist_alloc();
|
|
}
|
|
|
|
static inline void pgd_free(pgd_t * pgd)
|
|
{
|
|
pgtable_quicklist_free(pgd);
|
|
}
|
|
|
|
static inline void
|
|
pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
|
|
{
|
|
pud_val(*pud_entry) = __pa(pmd);
|
|
}
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return pgtable_quicklist_alloc();
|
|
}
|
|
|
|
static inline void pmd_free(pmd_t * pmd)
|
|
{
|
|
pgtable_quicklist_free(pmd);
|
|
}
|
|
|
|
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
|
|
|
|
static inline void
|
|
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
|
|
{
|
|
pmd_val(*pmd_entry) = page_to_phys(pte);
|
|
}
|
|
|
|
static inline void
|
|
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
|
|
{
|
|
pmd_val(*pmd_entry) = __pa(pte);
|
|
}
|
|
|
|
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|
unsigned long addr)
|
|
{
|
|
return virt_to_page(pgtable_quicklist_alloc());
|
|
}
|
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
unsigned long addr)
|
|
{
|
|
return pgtable_quicklist_alloc();
|
|
}
|
|
|
|
static inline void pte_free(struct page *pte)
|
|
{
|
|
pgtable_quicklist_free(page_address(pte));
|
|
}
|
|
|
|
static inline void pte_free_kernel(pte_t * pte)
|
|
{
|
|
pgtable_quicklist_free(pte);
|
|
}
|
|
|
|
#define __pte_free_tlb(tlb, pte) pte_free(pte)
|
|
|
|
extern void check_pgt_cache(void);
|
|
|
|
#endif /* _ASM_IA64_PGALLOC_H */
|