powerpc/mm: Convert virtual address to vpn

This patch convert different functions to take virtual page number
instead of virtual address. Virtual page number is virtual address
shifted right by VPN_SHIFT (12) bits. This enable us to have an
address range of upto 76 bits.

Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Aneesh Kumar K.V 2012-09-10 02:52:50 +00:00 committed by Benjamin Herrenschmidt
parent dcda287a9b
commit 5524a27d39
16 changed files with 324 additions and 240 deletions

View File

@ -59,7 +59,7 @@ struct hpte_cache {
struct hlist_node list_vpte;
struct hlist_node list_vpte_long;
struct rcu_head rcu_head;
u64 host_va;
u64 host_vpn;
u64 pfn;
ulong slot;
struct kvmppc_pte pte;

View File

@ -34,19 +34,19 @@ struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
void (*hpte_invalidate)(unsigned long slot,
unsigned long va,
unsigned long vpn,
int psize, int ssize,
int local);
long (*hpte_updatepp)(unsigned long slot,
unsigned long newpp,
unsigned long va,
unsigned long vpn,
int psize, int ssize,
int local);
void (*hpte_updateboltedpp)(unsigned long newpp,
unsigned long ea,
int psize, int ssize);
long (*hpte_insert)(unsigned long hpte_group,
unsigned long va,
unsigned long vpn,
unsigned long prpn,
unsigned long rflags,
unsigned long vflags,

View File

@ -154,9 +154,25 @@ struct mmu_psize_def
#define MMU_SEGSIZE_256M 0
#define MMU_SEGSIZE_1T 1
/*
* encode page number shift.
* in order to fit the 78 bit va in a 64 bit variable we shift the va by
* 12 bits. This enable us to address upto 76 bit va.
* For hpt hash from a va we can ignore the page size bits of va and for
* hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
* we work in all cases including 4k page size.
*/
#define VPN_SHIFT 12
#ifndef __ASSEMBLY__
static inline int segment_shift(int ssize)
{
if (ssize == MMU_SEGSIZE_256M)
return SID_SHIFT;
return SID_SHIFT_1T;
}
/*
* The current system page and segment sizes
*/
@ -179,19 +195,40 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
*/
extern int mmu_ci_restrictions;
/*
* This computes the AVPN and B fields of the first dword of a HPTE,
* for use when we want to match an existing PTE. The bottom 7 bits
* of the returned value are zero.
*/
static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
int ssize)
{
unsigned long v;
/*
* The AVA field omits the low-order 23 bits of the 78 bits VA.
* These bits are not needed in the PTE, because the
* low-order b of these bits are part of the byte offset
* into the virtual page and, if b < 23, the high-order
* 23-b of these bits are always used in selecting the
* PTEGs to be searched
*/
v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
v <<= HPTE_V_AVPN_SHIFT;
v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
/*
* This function sets the AVPN and L fields of the HPTE appropriately
* for the page size
*/
static inline unsigned long hpte_encode_v(unsigned long va, int psize,
int ssize)
static inline unsigned long hpte_encode_v(unsigned long vpn,
int psize, int ssize)
{
unsigned long v;
v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
v <<= HPTE_V_AVPN_SHIFT;
v = hpte_encode_avpn(vpn, psize, ssize);
if (psize != MMU_PAGE_4K)
v |= HPTE_V_LARGE;
v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
@ -216,30 +253,37 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
}
/*
* Build a VA given VSID, EA and segment size
* Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
*/
static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
int ssize)
static inline unsigned long hpt_vpn(unsigned long ea,
unsigned long vsid, int ssize)
{
if (ssize == MMU_SEGSIZE_256M)
return (vsid << 28) | (ea & 0xfffffffUL);
return (vsid << 40) | (ea & 0xffffffffffUL);
unsigned long mask;
int s_shift = segment_shift(ssize);
mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
}
/*
* This hashes a virtual address
*/
static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
int ssize)
static inline unsigned long hpt_hash(unsigned long vpn,
unsigned int shift, int ssize)
{
int mask;
unsigned long hash, vsid;
/* VPN_SHIFT can be atmost 12 */
if (ssize == MMU_SEGSIZE_256M) {
hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
((vpn & mask) >> (shift - VPN_SHIFT));
} else {
vsid = va >> 40;
hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
hash = vsid ^ (vsid << 25) ^
((vpn & mask) >> (shift - VPN_SHIFT)) ;
}
return hash & 0x7fffffffffUL;
}

View File

@ -58,14 +58,16 @@
/* Trick: we set __end to va + 64k, which happens works for
* a 16M page as well as we want only one iteration
*/
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \
unsigned long __end = va + PAGE_SIZE; \
unsigned __split = (psize == MMU_PAGE_4K || \
psize == MMU_PAGE_64K_AP); \
shift = mmu_psize_defs[psize].shift; \
for (index = 0; va < __end; index++, va += (1L << shift)) { \
if (!__split || __rpte_sub_valid(rpte, index)) do { \
#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
do { \
unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
unsigned __split = (psize == MMU_PAGE_4K || \
psize == MMU_PAGE_64K_AP); \
shift = mmu_psize_defs[psize].shift; \
for (index = 0; vpn < __end; index++, \
vpn += (1L << (shift - VPN_SHIFT))) { \
if (!__split || __rpte_sub_valid(rpte, index)) \
do {
#define pte_iterate_hashed_end() } while(0); } } while(0)

View File

@ -95,7 +95,7 @@ struct ppc64_tlb_batch {
unsigned long index;
struct mm_struct *mm;
real_pte_t pte[PPC64_TLB_BATCH_NR];
unsigned long vaddr[PPC64_TLB_BATCH_NR];
unsigned long vpn[PPC64_TLB_BATCH_NR];
unsigned int psize;
int ssize;
};
@ -127,7 +127,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
#define arch_flush_lazy_mmu_mode() do {} while (0)
extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
int ssize, int local);
extern void flush_hash_range(unsigned long number, int local);

View File

@ -141,7 +141,7 @@ extern char etext[];
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
{
pfn_t hpaddr;
u64 va;
u64 vpn;
u64 vsid;
struct kvmppc_sid_map *map;
volatile u32 *pteg;
@ -173,7 +173,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
BUG_ON(!map);
vsid = map->host_vsid;
va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT)
next_pteg:
if (rr == 16) {
@ -244,11 +244,11 @@ next_pteg:
dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
orig_pte->may_write ? 'w' : '-',
orig_pte->may_execute ? 'x' : '-',
orig_pte->eaddr, (ulong)pteg, va,
orig_pte->eaddr, (ulong)pteg, vpn,
orig_pte->vpage, hpaddr);
pte->slot = (ulong)&pteg[rr];
pte->host_va = va;
pte->host_vpn = vpn;
pte->pte = *orig_pte;
pte->pfn = hpaddr >> PAGE_SHIFT;

View File

@ -33,7 +33,7 @@
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
ppc_md.hpte_invalidate(pte->slot, pte->host_va,
ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
MMU_PAGE_4K, MMU_SEGSIZE_256M,
false);
}
@ -80,8 +80,9 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
{
unsigned long vpn;
pfn_t hpaddr;
ulong hash, hpteg, va;
ulong hash, hpteg;
u64 vsid;
int ret;
int rflags = 0x192;
@ -117,7 +118,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
}
vsid = map->host_vsid;
va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
if (!orig_pte->may_write)
rflags |= HPTE_R_PP;
@ -129,7 +130,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
else
kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
map_again:
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@ -141,7 +142,8 @@ map_again:
goto out;
}
ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
MMU_PAGE_4K, MMU_SEGSIZE_256M);
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
@ -152,7 +154,8 @@ map_again:
} else {
struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte);
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte);
/* The ppc_md code may give us a secondary entry even though we
asked for a primary. Fix up. */
@ -162,7 +165,7 @@ map_again:
}
pte->slot = hpteg + (ret & 7);
pte->host_va = va;
pte->host_vpn = vpn;
pte->pte = *orig_pte;
pte->pfn = hpaddr >> PAGE_SHIFT;

View File

@ -189,7 +189,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
TP_ARGS(pte),
TP_STRUCT__entry(
__field( u64, host_va )
__field( u64, host_vpn )
__field( u64, pfn )
__field( ulong, eaddr )
__field( u64, vpage )
@ -198,7 +198,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
),
TP_fast_assign(
__entry->host_va = pte->host_va;
__entry->host_vpn = pte->host_vpn;
__entry->pfn = pte->pfn;
__entry->eaddr = pte->pte.eaddr;
__entry->vpage = pte->pte.vpage;
@ -208,8 +208,8 @@ TRACE_EVENT(kvm_book3s_mmu_map,
(pte->pte.may_execute ? 0x1 : 0);
),
TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
__entry->host_va, __entry->pfn, __entry->eaddr,
TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
__entry->host_vpn, __entry->pfn, __entry->eaddr,
__entry->vpage, __entry->raddr, __entry->flags)
);
@ -218,7 +218,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
TP_ARGS(pte),
TP_STRUCT__entry(
__field( u64, host_va )
__field( u64, host_vpn )
__field( u64, pfn )
__field( ulong, eaddr )
__field( u64, vpage )
@ -227,7 +227,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
),
TP_fast_assign(
__entry->host_va = pte->host_va;
__entry->host_vpn = pte->host_vpn;
__entry->pfn = pte->pfn;
__entry->eaddr = pte->pte.eaddr;
__entry->vpage = pte->pte.vpage;
@ -238,7 +238,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
),
TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
__entry->host_va, __entry->pfn, __entry->eaddr,
__entry->host_vpn, __entry->pfn, __entry->eaddr,
__entry->vpage, __entry->raddr, __entry->flags)
);

View File

@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
* r29 is "va"
* r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
@ -111,10 +111,10 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Calc va and put it in r29 */
rldicr r29,r5,28,63-28
rldicl r3,r3,0,36
or r29,r3,r29
/* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
3: /* Calc VA and hash in r29 and r28 for 1T segment */
sldi r29,r5,40 /* vsid << 40 */
clrldi r3,r3,24 /* ea & 0xffffffffff */
3: /* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
or r29,r28,r29
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
*/
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@ -185,7 +190,7 @@ htab_insert_pte:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@ -278,7 +283,7 @@ htab_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
mr r5,r29 /* va */
mr r5,r29 /* vpn */
li r6,MMU_PAGE_4K /* page size */
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
* r29 is "va"
* r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
* r26 is the hidx mask
@ -394,10 +399,14 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Calc va and put it in r29 */
rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
or r29,r3,r29 /* r29 = va */
/* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT - VPN_SHIFT
/*
* clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
* srdi r28,r3,VPN_SHIFT
*/
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
3: /* Calc VA and hash in r29 and r28 for 1T segment */
sldi r29,r5,40 /* vsid << 40 */
clrldi r3,r3,24 /* ea & 0xffffffffff */
3: /* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
/*
* clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
* srdi r28,r3,VPN_SHIFT
*/
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
or r29,r28,r29
/*
* Calculate hash value for primary slot and
* store it in r28 for 1T segment
*/
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@ -488,7 +506,7 @@ htab_special_pfn:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove)
* useless now that the segment has been switched to 4k pages.
*/
htab_inval_old_hpte:
mr r3,r29 /* virtual addr */
mr r3,r29 /* vpn */
mr r4,r31 /* PTE.pte */
li r5,0 /* PTE.hidx */
li r6,MMU_PAGE_64K /* psize */
@ -620,7 +638,7 @@ htab_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
mr r5,r29 /* va */
mr r5,r29 /* vpn */
li r6,MMU_PAGE_4K /* page size */
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
* r29 is "va"
* r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
@ -729,10 +747,10 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Calc va and put it in r29 */
rldicr r29,r5,28,63-28
rldicl r3,r3,0,36
or r29,r3,r29
/* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
3: /* Calc VA and hash in r29 and r28 for 1T segment */
sldi r29,r5,40 /* vsid << 40 */
clrldi r3,r3,24 /* ea & 0xffffffffff */
3: /* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
or r29,r28,r29
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
*/
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
xor r28,r28,r5
or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@ -806,7 +829,7 @@ ht64_insert_pte:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARAM(R9)(r1) /* segment size */
@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARAM(R9)(r1) /* segment size */
@ -899,7 +922,7 @@ ht64_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
mr r5,r29 /* va */
mr r5,r29 /* vpn */
li r6,MMU_PAGE_64K
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */

View File

@ -39,22 +39,35 @@
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
static inline void __tlbie(unsigned long vpn, int psize, int ssize)
{
unsigned long va;
unsigned int penc;
/* clear top 16 bits, non SLS segment */
/*
* We need 14 to 65 bits of va for a tlibe of 4K page
* With vpn we ignore the lower VPN_SHIFT bits already.
* And top two bits are already ignored because we can
* only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
* of 12.
*/
va = vpn << VPN_SHIFT;
/*
* clear top 16 bits of 64bit va, non SLS segment
* Older versions of the architecture (2.02 and earler) require the
* masking of the top 16 bits.
*/
va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
va &= ~0xffful;
va |= ssize << 8;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
/* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
}
}
static inline void __tlbiel(unsigned long va, int psize, int ssize)
static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
{
unsigned long va;
unsigned int penc;
/* clear top 16 bits, non SLS segment */
/* VPN_SHIFT can be atmost 12 */
va = vpn << VPN_SHIFT;
/*
* clear top 16 bits of 64 bit va, non SLS segment
* Older versions of the architecture (2.02 and earler) require the
* masking of the top 16 bits.
*/
va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
va &= ~0xffful;
va |= ssize << 8;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory");
break;
default:
/* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
}
static inline void tlbie(unsigned long va, int psize, int ssize, int local)
static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
{
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
__tlbiel(va, psize, ssize);
__tlbiel(vpn, psize, ssize);
asm volatile("ptesync": : :"memory");
} else {
__tlbie(va, psize, ssize);
__tlbie(vpn, psize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
clear_bit_unlock(HPTE_LOCK_BIT, word);
}
static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
{
@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
int i;
if (!(vflags & HPTE_V_BOLTED)) {
DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
" rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
hpte_group, vpn, pa, rflags, vflags, psize);
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
if (i == HPTES_PER_GROUP)
return -1;
hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED)) {
@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
}
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int psize, int ssize,
unsigned long vpn, int psize, int ssize,
int local)
{
struct hash_pte *hptep = htab_address + slot;
unsigned long hpte_v, want_v;
int ret = 0;
want_v = hpte_encode_v(va, psize, ssize);
want_v = hpte_encode_v(vpn, psize, ssize);
DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
va, want_v & HPTE_V_AVPN, slot, newpp);
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
native_lock_hpte(hptep);
@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_unlock_hpte(hptep);
/* Ensure it is out of the tlb too. */
tlbie(va, psize, ssize, local);
tlbie(vpn, psize, ssize, local);
return ret;
}
static long native_hpte_find(unsigned long va, int psize, int ssize)
static long native_hpte_find(unsigned long vpn, int psize, int ssize)
{
struct hash_pte *hptep;
unsigned long hash;
@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
long slot;
unsigned long want_v, hpte_v;
hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
want_v = hpte_encode_v(va, psize, ssize);
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
want_v = hpte_encode_v(vpn, psize, ssize);
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize, int ssize)
{
unsigned long vsid, va;
unsigned long vpn;
unsigned long vsid;
long slot;
struct hash_pte *hptep;
vsid = get_kernel_vsid(ea, ssize);
va = hpt_va(ea, vsid, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
slot = native_hpte_find(va, psize, ssize);
slot = native_hpte_find(vpn, psize, ssize);
if (slot == -1)
panic("could not find page to bolt\n");
hptep = htab_address + slot;
@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
(newpp & (HPTE_R_PP | HPTE_R_N));
/* Ensure it is out of the tlb too. */
tlbie(va, psize, ssize, 0);
tlbie(vpn, psize, ssize, 0);
}
static void native_hpte_invalidate(unsigned long slot, unsigned long va,
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
struct hash_pte *hptep = htab_address + slot;
@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
local_irq_save(flags);
DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
want_v = hpte_encode_v(va, psize, ssize);
want_v = hpte_encode_v(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
hptep->v = 0;
/* Invalidate the TLB */
tlbie(va, psize, ssize, local);
tlbie(vpn, psize, ssize, local);
local_irq_restore(flags);
}
@ -349,7 +370,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
int *psize, int *ssize, unsigned long *va)
int *psize, int *ssize, unsigned long *vpn)
{
unsigned long avpn, pteg, vpi;
unsigned long hpte_r = hpte->r;
@ -399,7 +420,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
vpi = (vsid ^ pteg) & htab_hash_mask;
seg_off |= vpi << shift;
}
*va = vsid << SID_SHIFT | seg_off;
*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
case MMU_SEGSIZE_1T:
/* We only have 40 - 23 bits of seg_off in avpn */
seg_off = (avpn & 0x1ffff) << 23;
@ -408,9 +429,9 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
seg_off |= vpi << shift;
}
*va = vsid << SID_SHIFT_1T | seg_off;
*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
default:
*va = size = 0;
*vpn = size = 0;
}
*psize = size;
}
@ -425,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
*/
static void native_hpte_clear(void)
{
unsigned long vpn = 0;
unsigned long slot, slots, flags;
struct hash_pte *hptep = htab_address;
unsigned long hpte_v, va;
unsigned long hpte_v;
unsigned long pteg_count;
int psize, ssize;
@ -455,9 +477,9 @@ static void native_hpte_clear(void)
* already hold the native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &ssize, &va);
hpte_decode(hptep, slot, &psize, &ssize, &vpn);
hptep->v = 0;
__tlbie(va, psize, ssize);
__tlbie(vpn, psize, ssize);
}
}
@ -472,7 +494,8 @@ static void native_hpte_clear(void)
*/
static void native_flush_hash_range(unsigned long number, int local)
{
unsigned long va, hash, index, hidx, shift, slot;
unsigned long vpn;
unsigned long hash, index, hidx, shift, slot;
struct hash_pte *hptep;
unsigned long hpte_v;
unsigned long want_v;
@ -486,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
local_irq_save(flags);
for (i = 0; i < number; i++) {
va = batch->vaddr[i];
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
hash = hpt_hash(va, shift, ssize);
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
want_v = hpte_encode_v(va, psize, ssize);
want_v = hpte_encode_v(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@ -512,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
va = batch->vaddr[i];
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize, va, index,
shift) {
__tlbiel(va, psize, ssize);
pte_iterate_hashed_subpages(pte, psize,
vpn, index, shift) {
__tlbiel(vpn, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("ptesync":::"memory");
@ -529,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
va = batch->vaddr[i];
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize, va, index,
shift) {
__tlbie(va, psize, ssize);
pte_iterate_hashed_subpages(pte, psize,
vpn, index, shift) {
__tlbie(vpn, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("eieio; tlbsync; ptesync":::"memory");

View File

@ -191,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
vaddr += step, paddr += step) {
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
unsigned long va = hpt_va(vaddr, vsid, ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
hash = hpt_hash(va, shift, ssize);
hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
BUG_ON(!ppc_md.hpte_insert);
ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, ssize);
if (ret < 0)
@ -1152,21 +1152,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
int local)
{
unsigned long hash, index, shift, hidx, slot;
DBG_LOW("flush_hash_page(va=%016lx)\n", va);
pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
hash = hpt_hash(va, shift, ssize);
DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
} pte_iterate_hashed_end();
}
@ -1180,7 +1180,7 @@ void flush_hash_range(unsigned long number, int local)
&__get_cpu_var(ppc64_tlb_batch);
for (i = 0; i < number; i++)
flush_hash_page(batch->vaddr[i], batch->pte[i],
flush_hash_page(batch->vpn[i], batch->pte[i],
batch->psize, batch->ssize, local);
}
}
@ -1207,14 +1207,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
int ret;
hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
BUG_ON (ret < 0);
@ -1228,9 +1228,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hidx, slot;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
spin_lock(&linear_map_hash_lock);
BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
hidx = linear_map_hash_slots[lmi] & 0x7f;
@ -1240,7 +1240,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
}
void kernel_map_pages(struct page *page, int numpages, int enable)

View File

@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize)
{
unsigned long vpn;
unsigned long old_pte, new_pte;
unsigned long va, rflags, pa, sz;
unsigned long rflags, pa, sz;
long slot;
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
/* Search the Linux page table for a match with va */
va = hpt_va(ea, vsid, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
/* At this point, we have a pte (old_pte) which can be used to build
* or update an HPTE. There are 2 cases:
@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* There MIGHT be an HPTE for this pte */
unsigned long hash, slot;
hash = hpt_hash(va, shift, ssize);
hash = hpt_hash(vpn, shift, ssize);
if (old_pte & _PAGE_F_SECOND)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & _PAGE_F_GIX) >> 12;
if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
ssize, local) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
if (likely(!(old_pte & _PAGE_HASHPTE))) {
unsigned long hash = hpt_hash(va, shift, ssize);
unsigned long hash = hpt_hash(vpn, shift, ssize);
unsigned long hpte_group;
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@ -101,14 +102,14 @@ repeat:
_PAGE_COHERENT | _PAGE_GUARDED));
/* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
mmu_psize, ssize);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
HPTE_V_SECONDARY,
mmu_psize, ssize);
if (slot == -1) {

View File

@ -42,8 +42,9 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge)
{
unsigned long vpn;
struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
unsigned long vsid, vaddr;
unsigned long vsid;
unsigned int psize;
int ssize;
real_pte_t rpte;
@ -86,7 +87,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
vaddr = hpt_va(addr, vsid, ssize);
vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep);
/*
@ -96,7 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
* and decide to use local invalidates instead...
*/
if (!batch->active) {
flush_hash_page(vaddr, rpte, psize, ssize, 0);
flush_hash_page(vpn, rpte, psize, ssize, 0);
put_cpu_var(ppc64_tlb_batch);
return;
}
@ -122,7 +123,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->ssize = ssize;
}
batch->pte[i] = rpte;
batch->vaddr[i] = vaddr;
batch->vpn[i] = vpn;
batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch);
@ -146,7 +147,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
if (cpumask_equal(mm_cpumask(batch->mm), tmp))
local = 1;
if (i == 1)
flush_hash_page(batch->vaddr[0], batch->pte[0],
flush_hash_page(batch->vpn[0], batch->pte[0],
batch->psize, batch->ssize, local);
else
flush_hash_range(i, local);

View File

@ -88,7 +88,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
}
static long beat_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long pa,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize, int ssize)
{
@ -103,7 +103,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
@ -184,14 +184,14 @@ static void beat_lpar_hptab_clear(void)
*/
static long beat_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
unsigned long va,
unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
u64 dummy0, dummy1;
unsigned long want_v;
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
DBG_LOW(" update: "
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@ -220,15 +220,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
return 0;
}
static long beat_lpar_hpte_find(unsigned long va, int psize)
static long beat_lpar_hpte_find(unsigned long vpn, int psize)
{
unsigned long hash;
unsigned long i, j;
long slot;
unsigned long want_v, hpte_v;
hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@ -255,14 +255,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea,
int psize, int ssize)
{
unsigned long lpar_rc, slot, vsid, va;
unsigned long vpn;
unsigned long lpar_rc, slot, vsid;
u64 dummy0, dummy1;
vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
va = hpt_va(ea, vsid, MMU_SEGSIZE_256M);
vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
raw_spin_lock(&beat_htab_lock);
slot = beat_lpar_hpte_find(va, psize);
slot = beat_lpar_hpte_find(vpn, psize);
BUG_ON(slot == -1);
lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
@ -272,7 +273,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
BUG_ON(lpar_rc != 0);
}
static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long want_v;
@ -282,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
slot, va, psize, local);
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
raw_spin_lock_irqsave(&beat_htab_lock, flags);
dummy1 = beat_lpar_hpte_getword0(slot);
@ -311,7 +312,7 @@ void __init hpte_init_beat(void)
}
static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
unsigned long va, unsigned long pa,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize, int ssize)
{
@ -322,11 +323,11 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
return -1;
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
hpte_group, vpn, pa, rflags, vflags, psize);
hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
@ -364,14 +365,14 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
*/
static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
unsigned long newpp,
unsigned long va,
unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
unsigned long want_v;
unsigned long pss;
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
DBG_LOW(" update: "
@ -392,16 +393,16 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
return 0;
}
static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va,
static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long want_v;
unsigned long lpar_rc;
unsigned long pss;
DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
slot, va, psize, local);
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
slot, vpn, psize, local);
want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);

View File

@ -43,7 +43,7 @@ enum ps3_lpar_vas_id {
static DEFINE_SPINLOCK(ps3_htab_lock);
static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags, unsigned long vflags,
int psize, int ssize)
{
@ -61,7 +61,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
*/
vflags &= ~HPTE_V_SECONDARY;
hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
spin_lock_irqsave(&ps3_htab_lock, flags);
@ -75,8 +75,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
if (result) {
/* all entries bolted !*/
pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
__func__, result, va, pa, hpte_group, hpte_v, hpte_r);
pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
__func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
BUG();
}
@ -107,7 +107,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
}
static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int psize, int ssize, int local)
unsigned long vpn, int psize, int ssize, int local)
{
int result;
u64 hpte_v, want_v, hpte_rs;
@ -115,7 +115,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long flags;
long ret;
want_v = hpte_encode_v(va, psize, ssize);
want_v = hpte_encode_v(vpn, psize, ssize);
spin_lock_irqsave(&ps3_htab_lock, flags);
@ -125,8 +125,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
&hpte_rs);
if (result) {
pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n",
__func__, result, va, slot, psize);
pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
__func__, result, vpn, slot, psize);
BUG();
}
@ -159,7 +159,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
panic("ps3_hpte_updateboltedpp() not implemented");
}
static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long flags;
@ -170,8 +170,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
if (result) {
pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n",
__func__, result, va, slot, psize);
pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
__func__, result, vpn, slot, psize);
BUG();
}

View File

@ -107,9 +107,9 @@ void vpa_init(int cpu)
}
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize, int ssize)
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize, int ssize)
{
unsigned long lpar_rc;
unsigned long flags;
@ -117,11 +117,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long hpte_v, hpte_r;
if (!(vflags & HPTE_V_BOLTED))
pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
"pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, vpn, pa, rflags, vflags, psize);
hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
@ -225,22 +225,6 @@ static void pSeries_lpar_hptab_clear(void)
}
}
/*
* This computes the AVPN and B fields of the first dword of a HPTE,
* for use when we want to match an existing PTE. The bottom 7 bits
* of the returned value are zero.
*/
static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
int ssize)
{
unsigned long v;
v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
v <<= HPTE_V_AVPN_SHIFT;
v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
/*
* NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
* the low 3 bits of flags happen to line up. So no transform is needed.
@ -249,14 +233,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
*/
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
unsigned long va,
unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
unsigned long flags = (newpp & 7) | H_AVPN;
unsigned long want_v;
want_v = hpte_encode_avpn(va, psize, ssize);
want_v = hpte_encode_avpn(vpn, psize, ssize);
pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
want_v, slot, flags, psize);
@ -294,15 +278,15 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
return dword0;
}
static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize)
static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
{
unsigned long hash;
unsigned long i;
long slot;
unsigned long want_v, hpte_v;
hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
want_v = hpte_encode_avpn(va, psize, ssize);
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
want_v = hpte_encode_avpn(vpn, psize, ssize);
/* Bolted entries are always in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@ -322,12 +306,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea,
int psize, int ssize)
{
unsigned long lpar_rc, slot, vsid, va, flags;
unsigned long vpn;
unsigned long lpar_rc, slot, vsid, flags;
vsid = get_kernel_vsid(ea, ssize);
va = hpt_va(ea, vsid, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
slot = pSeries_lpar_hpte_find(va, psize, ssize);
slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
flags = newpp & 7;
@ -336,17 +321,17 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
BUG_ON(lpar_rc != H_SUCCESS);
}
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long want_v;
unsigned long lpar_rc;
unsigned long dummy1, dummy2;
pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
slot, va, psize, local);
pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
slot, vpn, psize, local);
want_v = hpte_encode_avpn(va, psize, ssize);
want_v = hpte_encode_avpn(vpn, psize, ssize);
lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
if (lpar_rc == H_NOT_FOUND)
return;
@ -357,15 +342,16 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
static void pSeries_lpar_hpte_removebolted(unsigned long ea,
int psize, int ssize)
{
unsigned long slot, vsid, va;
unsigned long vpn;
unsigned long slot, vsid;
vsid = get_kernel_vsid(ea, ssize);
va = hpt_va(ea, vsid, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
slot = pSeries_lpar_hpte_find(va, psize, ssize);
slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0);
pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
}
/* Flag bits for H_BULK_REMOVE */
@ -381,12 +367,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea,
*/
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
{
unsigned long vpn;
unsigned long i, pix, rc;
unsigned long flags = 0;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long param[9];
unsigned long va;
unsigned long hash, index, shift, hidx, slot;
real_pte_t pte;
int psize, ssize;
@ -398,21 +384,21 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
ssize = batch->ssize;
pix = 0;
for (i = 0; i < number; i++) {
va = batch->vaddr[i];
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
hash = hpt_hash(va, shift, ssize);
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
pSeries_lpar_hpte_invalidate(slot, va, psize,
pSeries_lpar_hpte_invalidate(slot, vpn, psize,
ssize, local);
} else {
param[pix] = HBR_REQUEST | HBR_AVPN | slot;
param[pix+1] = hpte_encode_avpn(va, psize,
param[pix+1] = hpte_encode_avpn(vpn, psize,
ssize);
pix += 2;
if (pix == 8) {