mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-25 20:49:49 +00:00
target/ppc: introduce vsr64_offset() to simplify get_cpu_vsr{l,h}() and set_cpu_vsr{l,h}()
Now that all VSX registers are stored in host endian order, there is no need to go via different accessors depending upon the register number. Instead we introduce vsr64_offset() and use it directly from within get_cpu_vsr{l,h}() and set_cpu_vsr{l,h}(). This also allows us to rewrite avr64_offset() and fpr_offset() in terms of the new vsr64_offset() function to more clearly express the relationship between the VSX, FPR and VMX registers, and also remove vsrl_offset() which is no longer required. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Message-Id: <20190307180520.13868-8-mark.cave-ayland@ilande.co.uk> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
8a14d31b00
commit
d59d1182b1
@ -2583,19 +2583,9 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
|
||||
#define VsrSD(i) s64[1 - (i)]
|
||||
#endif
|
||||
|
||||
static inline int fpr_offset(int i)
|
||||
static inline int vsr64_offset(int i, bool high)
|
||||
{
|
||||
return offsetof(CPUPPCState, vsr[i].VsrD(0));
|
||||
}
|
||||
|
||||
static inline uint64_t *cpu_fpr_ptr(CPUPPCState *env, int i)
|
||||
{
|
||||
return (uint64_t *)((uintptr_t)env + fpr_offset(i));
|
||||
}
|
||||
|
||||
static inline int vsrl_offset(int i)
|
||||
{
|
||||
return offsetof(CPUPPCState, vsr[i].VsrD(1));
|
||||
return offsetof(CPUPPCState, vsr[i].VsrD(high ? 0 : 1));
|
||||
}
|
||||
|
||||
static inline int vsr_full_offset(int i)
|
||||
@ -2603,14 +2593,24 @@ static inline int vsr_full_offset(int i)
|
||||
return offsetof(CPUPPCState, vsr[i].u64[0]);
|
||||
}
|
||||
|
||||
static inline int fpr_offset(int i)
|
||||
{
|
||||
return vsr64_offset(i, true);
|
||||
}
|
||||
|
||||
static inline uint64_t *cpu_fpr_ptr(CPUPPCState *env, int i)
|
||||
{
|
||||
return (uint64_t *)((uintptr_t)env + fpr_offset(i));
|
||||
}
|
||||
|
||||
static inline uint64_t *cpu_vsrl_ptr(CPUPPCState *env, int i)
|
||||
{
|
||||
return (uint64_t *)((uintptr_t)env + vsrl_offset(i));
|
||||
return (uint64_t *)((uintptr_t)env + vsr64_offset(i, false));
|
||||
}
|
||||
|
||||
static inline long avr64_offset(int i, bool high)
|
||||
{
|
||||
return offsetof(CPUPPCState, vsr[32 + i].VsrD(high ? 0 : 1));
|
||||
return vsr64_offset(i + 32, high);
|
||||
}
|
||||
|
||||
static inline int avr_full_offset(int i)
|
||||
|
@ -1,49 +1,23 @@
|
||||
/*** VSX extension ***/
|
||||
|
||||
static inline void get_vsrl(TCGv_i64 dst, int n)
|
||||
{
|
||||
tcg_gen_ld_i64(dst, cpu_env, vsrl_offset(n));
|
||||
}
|
||||
|
||||
static inline void set_vsrl(int n, TCGv_i64 src)
|
||||
{
|
||||
tcg_gen_st_i64(src, cpu_env, vsrl_offset(n));
|
||||
}
|
||||
|
||||
static inline void get_cpu_vsrh(TCGv_i64 dst, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
get_fpr(dst, n);
|
||||
} else {
|
||||
get_avr64(dst, n - 32, true);
|
||||
}
|
||||
tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, true));
|
||||
}
|
||||
|
||||
static inline void get_cpu_vsrl(TCGv_i64 dst, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
get_vsrl(dst, n);
|
||||
} else {
|
||||
get_avr64(dst, n - 32, false);
|
||||
}
|
||||
tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, false));
|
||||
}
|
||||
|
||||
static inline void set_cpu_vsrh(int n, TCGv_i64 src)
|
||||
{
|
||||
if (n < 32) {
|
||||
set_fpr(n, src);
|
||||
} else {
|
||||
set_avr64(n - 32, src, true);
|
||||
}
|
||||
tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, true));
|
||||
}
|
||||
|
||||
static inline void set_cpu_vsrl(int n, TCGv_i64 src)
|
||||
{
|
||||
if (n < 32) {
|
||||
set_vsrl(n, src);
|
||||
} else {
|
||||
set_avr64(n - 32, src, false);
|
||||
}
|
||||
tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false));
|
||||
}
|
||||
|
||||
#define VSX_LOAD_SCALAR(name, operation) \
|
||||
|
Loading…
Reference in New Issue
Block a user