target/arm: Add gen_mte_checkN

Replace existing uses of check_data_tbi in translate-a64.c that
perform multiple logical memory access.  Leave the helper blank
for now to reduce the patch size.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-25-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2020-06-25 20:31:22 -07:00 committed by Peter Maydell
parent 0a405be2b8
commit 73ceeb0011
4 changed files with 66 additions and 16 deletions

View File

@ -105,6 +105,7 @@ DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64)
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
DEF_HELPER_FLAGS_3(ldg, TCG_CALL_NO_WG, i64, env, i64, i64)

View File

@ -366,3 +366,11 @@ uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
{
return ptr;
}
/*
* Perform an MTE checked access for multiple logical accesses.
*/
uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
{
return ptr;
}

View File

@ -284,6 +284,34 @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
false, get_mem_index(s));
}
/*
* For MTE, check multiple logical sequential accesses.
*/
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
bool tag_checked, int log2_esize, int total_size)
{
if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) {
TCGv_i32 tcg_desc;
TCGv_i64 ret;
int desc = 0;
desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize);
desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size);
tcg_desc = tcg_const_i32(desc);
ret = new_tmp_a64(s);
gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr);
tcg_temp_free_i32(tcg_desc);
return ret;
}
return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize);
}
typedef struct DisasCompare64 {
TCGCond cond;
TCGv_i64 value;
@ -2848,7 +2876,10 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
}
}
clean_addr = clean_data_tbi(s, dirty_addr);
clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
(wback || rn != 31) && !set_tag,
size, 2 << size);
if (is_vector) {
if (is_load) {
do_fp_ld(s, rt, clean_addr, size);
@ -3514,7 +3545,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
MemOp endian = s->be_data;
int ebytes; /* bytes per element */
int total; /* total bytes */
int elements; /* elements per vector */
int rpt; /* num iterations */
int selem; /* structure elements */
@ -3584,19 +3615,26 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
endian = MO_LE;
}
/* Consecutive little-endian elements from a single register
total = rpt * selem * (is_q ? 16 : 8);
tcg_rn = cpu_reg_sp(s, rn);
/*
* Issue the MTE check vs the logical repeat count, before we
* promote consecutive little-endian elements below.
*/
clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
size, total);
/*
* Consecutive little-endian elements from a single register
* can be promoted to a larger little-endian operation.
*/
if (selem == 1 && endian == MO_LE) {
size = 3;
}
ebytes = 1 << size;
elements = (is_q ? 16 : 8) / ebytes;
tcg_rn = cpu_reg_sp(s, rn);
clean_addr = clean_data_tbi(s, tcg_rn);
tcg_ebytes = tcg_const_i64(ebytes);
elements = (is_q ? 16 : 8) >> size;
tcg_ebytes = tcg_const_i64(1 << size);
for (r = 0; r < rpt; r++) {
int e;
for (e = 0; e < elements; e++) {
@ -3630,7 +3668,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
if (is_postidx) {
if (rm == 31) {
tcg_gen_addi_i64(tcg_rn, tcg_rn, rpt * elements * selem * ebytes);
tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
} else {
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
}
@ -3676,7 +3714,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
bool replicate = false;
int index = is_q << 3 | S << 2 | size;
int ebytes, xs;
int xs, total;
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
if (extract32(insn, 31, 1)) {
@ -3730,16 +3768,17 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
return;
}
ebytes = 1 << scale;
if (rn == 31) {
gen_check_sp_alignment(s);
}
total = selem << scale;
tcg_rn = cpu_reg_sp(s, rn);
clean_addr = clean_data_tbi(s, tcg_rn);
tcg_ebytes = tcg_const_i64(ebytes);
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
scale, total);
tcg_ebytes = tcg_const_i64(1 << scale);
for (xs = 0; xs < selem; xs++) {
if (replicate) {
/* Load and replicate to all elements */
@ -3766,7 +3805,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
if (is_postidx) {
if (rm == 31) {
tcg_gen_addi_i64(tcg_rn, tcg_rn, selem * ebytes);
tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
} else {
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
}

View File

@ -42,6 +42,8 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
bool sve_access_check(DisasContext *s);
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
bool tag_checked, int log2_size);
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
bool tag_checked, int count, int log2_esize);
/* We should have at some point before trying to access an FP register
* done the necessary access check, so assert that