mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-02-06 11:19:56 +00:00
MN10300: Make the use of PIDR to mark TLB entries controllable
Make controllable the use of the PIDR register to mark TLB entries as belonging to particular processes. Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com> Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com>
This commit is contained in:
parent
492e675116
commit
a9bc60ebfd
@ -142,6 +142,9 @@ config FPU
|
|||||||
|
|
||||||
source "arch/mn10300/mm/Kconfig.cache"
|
source "arch/mn10300/mm/Kconfig.cache"
|
||||||
|
|
||||||
|
config MN10300_TLB_USE_PIDR
|
||||||
|
def_bool y
|
||||||
|
|
||||||
menu "Memory layout options"
|
menu "Memory layout options"
|
||||||
|
|
||||||
config KERNEL_RAM_BASE_ADDRESS
|
config KERNEL_RAM_BASE_ADDRESS
|
||||||
|
@ -27,28 +27,22 @@
|
|||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm-generic/mm_hooks.h>
|
#include <asm-generic/mm_hooks.h>
|
||||||
|
|
||||||
|
#define MMU_CONTEXT_TLBPID_NR 256
|
||||||
#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
|
#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
|
||||||
#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
|
#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
|
||||||
#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
|
#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
|
||||||
#define MMU_NO_CONTEXT 0x00000000UL
|
#define MMU_NO_CONTEXT 0x00000000UL
|
||||||
|
#define MMU_CONTEXT_TLBPID_LOCK_NR 0
|
||||||
extern unsigned long mmu_context_cache[NR_CPUS];
|
|
||||||
#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
|
|
||||||
|
|
||||||
#define enter_lazy_tlb(mm, tsk) do {} while (0)
|
#define enter_lazy_tlb(mm, tsk) do {} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_MN10300_TLB_USE_PIDR
|
||||||
#define cpu_ran_vm(cpu, mm) \
|
extern unsigned long mmu_context_cache[NR_CPUS];
|
||||||
cpumask_set_cpu((cpu), mm_cpumask(mm))
|
#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
|
||||||
#define cpu_maybe_ran_vm(cpu, mm) \
|
|
||||||
cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
|
|
||||||
#else
|
|
||||||
#define cpu_ran_vm(cpu, mm) do {} while (0)
|
|
||||||
#define cpu_maybe_ran_vm(cpu, mm) true
|
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* allocate an MMU context
|
* allocate_mmu_context - Allocate storage for the arch-specific MMU data
|
||||||
|
* @mm: The userspace VM context being set up
|
||||||
*/
|
*/
|
||||||
static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
|
static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
@ -100,35 +94,42 @@ static inline int init_new_context(struct task_struct *tsk,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* destroy context related info for an mm_struct that is about to be put to
|
|
||||||
* rest
|
|
||||||
*/
|
|
||||||
#define destroy_context(mm) do { } while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* after we have set current->mm to a new value, this activates the context for
|
* after we have set current->mm to a new value, this activates the context for
|
||||||
* the new mm so we see the new mappings.
|
* the new mm so we see the new mappings.
|
||||||
*/
|
*/
|
||||||
static inline void activate_context(struct mm_struct *mm, int cpu)
|
static inline void activate_context(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
|
PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
|
||||||
}
|
}
|
||||||
|
#else /* CONFIG_MN10300_TLB_USE_PIDR */
|
||||||
|
|
||||||
/*
|
#define init_new_context(tsk, mm) (0)
|
||||||
* change between virtual memory sets
|
#define activate_context(mm) local_flush_tlb()
|
||||||
|
|
||||||
|
#endif /* CONFIG_MN10300_TLB_USE_PIDR */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* destroy_context - Destroy mm context information
|
||||||
|
* @mm: The MM being destroyed.
|
||||||
|
*
|
||||||
|
* Destroy context related info for an mm_struct that is about to be put to
|
||||||
|
* rest
|
||||||
|
*/
|
||||||
|
#define destroy_context(mm) do {} while (0)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* switch_mm - Change between userspace virtual memory contexts
|
||||||
|
* @prev: The outgoing MM context.
|
||||||
|
* @next: The incoming MM context.
|
||||||
|
* @tsk: The incoming task.
|
||||||
*/
|
*/
|
||||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
if (prev != next) {
|
if (prev != next) {
|
||||||
cpu_ran_vm(cpu, next);
|
|
||||||
activate_context(next, cpu);
|
|
||||||
PTBR = (unsigned long) next->pgd;
|
PTBR = (unsigned long) next->pgd;
|
||||||
} else if (!cpu_maybe_ran_vm(cpu, next)) {
|
activate_context(next);
|
||||||
activate_context(next, cpu);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,6 +13,12 @@
|
|||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
|
struct tlb_state {
|
||||||
|
struct mm_struct *active_mm;
|
||||||
|
int state;
|
||||||
|
};
|
||||||
|
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
|
* local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
|
||||||
*/
|
*/
|
||||||
@ -31,20 +37,51 @@ static inline void local_flush_tlb(void)
|
|||||||
/**
|
/**
|
||||||
* local_flush_tlb_all - Flush all entries from the local CPU's TLBs
|
* local_flush_tlb_all - Flush all entries from the local CPU's TLBs
|
||||||
*/
|
*/
|
||||||
#define local_flush_tlb_all() local_flush_tlb()
|
static inline void local_flush_tlb_all(void)
|
||||||
|
{
|
||||||
|
local_flush_tlb();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* local_flush_tlb_one - Flush one entry from the local CPU's TLBs
|
* local_flush_tlb_one - Flush one entry from the local CPU's TLBs
|
||||||
*/
|
*/
|
||||||
#define local_flush_tlb_one(addr) local_flush_tlb()
|
static inline void local_flush_tlb_one(unsigned long addr)
|
||||||
|
{
|
||||||
|
local_flush_tlb();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
|
* local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
|
||||||
* @mm: The MM to flush for
|
* @mm: The MM to flush for
|
||||||
* @addr: The address of the target page in RAM (not its page struct)
|
* @addr: The address of the target page in RAM (not its page struct)
|
||||||
*/
|
*/
|
||||||
extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr);
|
static inline
|
||||||
|
void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
|
||||||
|
{
|
||||||
|
unsigned long pteu, flags, cnx;
|
||||||
|
|
||||||
|
addr &= PAGE_MASK;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
cnx = 1;
|
||||||
|
#ifdef CONFIG_MN10300_TLB_USE_PIDR
|
||||||
|
cnx = mm->context.tlbpid[smp_processor_id()];
|
||||||
|
#endif
|
||||||
|
if (cnx) {
|
||||||
|
pteu = addr;
|
||||||
|
#ifdef CONFIG_MN10300_TLB_USE_PIDR
|
||||||
|
pteu |= cnx & xPTEU_PID;
|
||||||
|
#endif
|
||||||
|
IPTEU = pteu;
|
||||||
|
DPTEU = pteu;
|
||||||
|
if (IPTEL & xPTEL_V)
|
||||||
|
IPTEL = 0;
|
||||||
|
if (DPTEL & xPTEL_V)
|
||||||
|
DPTEL = 0;
|
||||||
|
}
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TLB flushing:
|
* TLB flushing:
|
||||||
|
@ -13,40 +13,15 @@
|
|||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_MN10300_TLB_USE_PIDR
|
||||||
/*
|
/*
|
||||||
* list of the MMU contexts last allocated on each CPU
|
* list of the MMU contexts last allocated on each CPU
|
||||||
*/
|
*/
|
||||||
unsigned long mmu_context_cache[NR_CPUS] = {
|
unsigned long mmu_context_cache[NR_CPUS] = {
|
||||||
[0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
|
[0 ... NR_CPUS - 1] =
|
||||||
|
MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
|
||||||
};
|
};
|
||||||
|
#endif /* CONFIG_MN10300_TLB_USE_PIDR */
|
||||||
/*
|
|
||||||
* flush the specified TLB entry
|
|
||||||
*/
|
|
||||||
void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
|
|
||||||
{
|
|
||||||
unsigned long pteu, cnx, flags;
|
|
||||||
|
|
||||||
addr &= PAGE_MASK;
|
|
||||||
|
|
||||||
/* make sure the context doesn't migrate and defend against
|
|
||||||
* interference from vmalloc'd regions */
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
cnx = mm_context(mm);
|
|
||||||
|
|
||||||
if (cnx != MMU_NO_CONTEXT) {
|
|
||||||
pteu = addr | (cnx & 0x000000ffUL);
|
|
||||||
IPTEU = pteu;
|
|
||||||
DPTEU = pteu;
|
|
||||||
if (IPTEL & xPTEL_V)
|
|
||||||
IPTEL = 0;
|
|
||||||
if (DPTEL & xPTEL_V)
|
|
||||||
DPTEL = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* preemptively set a TLB entry
|
* preemptively set a TLB entry
|
||||||
@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
|
|||||||
* interference from vmalloc'd regions */
|
* interference from vmalloc'd regions */
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
cnx = ~MMU_NO_CONTEXT;
|
||||||
|
#ifdef CONFIG_MN10300_TLB_USE_PIDR
|
||||||
cnx = mm_context(vma->vm_mm);
|
cnx = mm_context(vma->vm_mm);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (cnx != MMU_NO_CONTEXT) {
|
if (cnx != MMU_NO_CONTEXT) {
|
||||||
pteu = addr | (cnx & 0x000000ffUL);
|
pteu = addr;
|
||||||
|
#ifdef CONFIG_MN10300_TLB_USE_PIDR
|
||||||
|
pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
|
||||||
|
#endif
|
||||||
if (!(pte_val(pte) & _PAGE_NX)) {
|
if (!(pte_val(pte) & _PAGE_NX)) {
|
||||||
IPTEU = pteu;
|
IPTEU = pteu;
|
||||||
if (IPTEL & xPTEL_V)
|
if (IPTEL & xPTEL_V)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user