mirror of
https://github.com/xemu-project/xemu.git
synced 2025-02-03 18:53:12 +00:00
Improvements for TARGET_PAGE_BITS_VARY
Fix for TCI ld16u_i64. Fix for segv on icount execute from i/o memory. Two misc cleanups. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl23AUwdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/nLwgAgp6uXYfxu5L5gyza tvAPieqn/Gyn9weNuJE6ZIgti9xmhVIz8V/EH5VabXBpwaWTs8ttDinjv05BgSUm p6Y0Zdbv6/oERA5KNtVuPMZXUpAyFL8w4bqtkwnlp7+0t7v1AtuNH09WqZ7B/VYG 0zMPic8n/gi9zRCnSBgToRXJrK9FBmVeJFjJEG42rLV7GBGBaTllFcWzT+EyoknO OigvZb6ZkNP+cAPZY4ELj2qrR+ziVMxyAMHxGQ4QiCx6QMAys+/Mf44Is8425h1/ GRKAEtfdUgptPMWLTQkivaH9l8pe71oj44NvCCSvfkPfynXAEyYsW+epCDWAmG71 sHbuFQ== =wSJn -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20191028' into staging Improvements for TARGET_PAGE_BITS_VARY Fix for TCI ld16u_i64. Fix for segv on icount execute from i/o memory. Two misc cleanups. # gpg: Signature made Mon 28 Oct 2019 14:55:08 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20191028: translate-all: Remove tb_alloc translate-all: fix uninitialized tb->orig_tb cputlb: Fix tlb_vaddr_to_host exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY exec: Promote TARGET_PAGE_MASK to target_long exec: Restrict TARGET_PAGE_BITS_VARY assert to CONFIG_DEBUG_TCG exec: Use const alias for TARGET_PAGE_BITS_VARY configure: Detect compiler support for __attribute__((alias)) exec: Split out variable page size support to exec-vary.c cpu: use ROUND_UP() to define xxx_PAGE_ALIGN cputlb: ensure _cmmu helper functions follow the naming standard tci: Add implementation for INDEX_op_ld16u_i64 Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
8c68ff250a
@ -107,7 +107,7 @@ obj-y += trace/
|
||||
|
||||
#########################################################
|
||||
# cpu emulator library
|
||||
obj-y += exec.o
|
||||
obj-y += exec.o exec-vary.o
|
||||
obj-y += accel/
|
||||
obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/tcg-op-vec.o tcg/tcg-op-gvec.o
|
||||
obj-$(CONFIG_TCG) += tcg/tcg-common.o tcg/optimize.o
|
||||
|
@ -1189,7 +1189,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
{
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
uintptr_t tlb_addr, page;
|
||||
target_ulong tlb_addr, page;
|
||||
size_t elt_ofs;
|
||||
|
||||
switch (access_type) {
|
||||
@ -1862,12 +1862,18 @@ static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
|
||||
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
|
||||
}
|
||||
|
||||
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
||||
uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return full_ldub_cmmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
@ -1875,12 +1881,18 @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
full_le_lduw_cmmu);
|
||||
}
|
||||
|
||||
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return full_le_lduw_cmmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
@ -1888,12 +1900,18 @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
full_be_lduw_cmmu);
|
||||
}
|
||||
|
||||
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return full_be_lduw_cmmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
|
@ -1156,23 +1156,6 @@ void tcg_exec_init(unsigned long tb_size)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new translation block. Flush the translation buffer if
|
||||
* too many translation blocks or too much generated code.
|
||||
*/
|
||||
static TranslationBlock *tb_alloc(target_ulong pc)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
tb = tcg_tb_alloc(tcg_ctx);
|
||||
if (unlikely(tb == NULL)) {
|
||||
return NULL;
|
||||
}
|
||||
return tb;
|
||||
}
|
||||
|
||||
/* call with @p->lock held */
|
||||
static inline void invalidate_page_bitmap(PageDesc *p)
|
||||
{
|
||||
@ -1681,6 +1664,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
TCGProfile *prof = &tcg_ctx->prof;
|
||||
int64_t ti;
|
||||
#endif
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
phys_pc = get_page_addr_code(env, pc);
|
||||
@ -1706,7 +1690,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
}
|
||||
|
||||
buffer_overflow:
|
||||
tb = tb_alloc(pc);
|
||||
tb = tcg_tb_alloc(tcg_ctx);
|
||||
if (unlikely(!tb)) {
|
||||
/* flush must be done */
|
||||
tb_flush(cpu);
|
||||
@ -1722,6 +1706,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
tb->cs_base = cs_base;
|
||||
tb->flags = flags;
|
||||
tb->cflags = cflags;
|
||||
tb->orig_tb = NULL;
|
||||
tb->trace_vcpu_dstate = *cpu->trace_dstate;
|
||||
tcg_ctx->tb_cflags = cflags;
|
||||
tb_overflow:
|
||||
|
19
configure
vendored
19
configure
vendored
@ -5518,6 +5518,21 @@ if compile_prog "" "" ; then
|
||||
vector16=yes
|
||||
fi
|
||||
|
||||
########################################
|
||||
# See if __attribute__((alias)) is supported.
|
||||
# This false for Xcode 9, but has been remedied for Xcode 10.
|
||||
# Unfortunately, travis uses Xcode 9 by default.
|
||||
|
||||
attralias=no
|
||||
cat > $TMPC << EOF
|
||||
int x = 1;
|
||||
extern const int y __attribute__((alias("x")));
|
||||
int main(void) { return 0; }
|
||||
EOF
|
||||
if compile_prog "" "" ; then
|
||||
attralias=yes
|
||||
fi
|
||||
|
||||
########################################
|
||||
# check if getauxval is available.
|
||||
|
||||
@ -7083,6 +7098,10 @@ if test "$vector16" = "yes" ; then
|
||||
echo "CONFIG_VECTOR16=y" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$attralias" = "yes" ; then
|
||||
echo "CONFIG_ATTRIBUTE_ALIAS=y" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$getauxval" = "yes" ; then
|
||||
echo "CONFIG_GETAUXVAL=y" >> $config_host_mak
|
||||
fi
|
||||
|
108
exec-vary.c
Normal file
108
exec-vary.c
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Variable page size handling
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
|
||||
#define IN_EXEC_VARY 1
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
#ifdef TARGET_PAGE_BITS_VARY
|
||||
# ifdef CONFIG_ATTRIBUTE_ALIAS
|
||||
/*
|
||||
* We want to declare the "target_page" variable as const, which tells
|
||||
* the compiler that it can cache any value that it reads across calls.
|
||||
* This avoids multiple assertions and multiple reads within any one user.
|
||||
*
|
||||
* This works because we finish initializing the data before we ever read
|
||||
* from the "target_page" symbol.
|
||||
*
|
||||
* This also requires that we have a non-constant symbol by which we can
|
||||
* perform the actual initialization, and which forces the data to be
|
||||
* allocated within writable memory. Thus "init_target_page", and we use
|
||||
* that symbol exclusively in the two functions that initialize this value.
|
||||
*
|
||||
* The "target_page" symbol is created as an alias of "init_target_page".
|
||||
*/
|
||||
static TargetPageBits init_target_page;
|
||||
|
||||
/*
|
||||
* Note that this is *not* a redundant decl, this is the definition of
|
||||
* the "target_page" symbol. The syntax for this definition requires
|
||||
* the use of the extern keyword. This seems to be a GCC bug in
|
||||
* either the syntax for the alias attribute or in -Wredundant-decls.
|
||||
*
|
||||
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91765
|
||||
*/
|
||||
# pragma GCC diagnostic push
|
||||
# pragma GCC diagnostic ignored "-Wredundant-decls"
|
||||
|
||||
extern const TargetPageBits target_page
|
||||
__attribute__((alias("init_target_page")));
|
||||
|
||||
# pragma GCC diagnostic pop
|
||||
# else
|
||||
/*
|
||||
* When aliases are not supported then we force two different declarations,
|
||||
* by way of suppressing the header declaration with IN_EXEC_VARY.
|
||||
* We assume that on such an old compiler, LTO cannot be used, and so the
|
||||
* compiler cannot not detect the mismatched declarations, and all is well.
|
||||
*/
|
||||
TargetPageBits target_page;
|
||||
# define init_target_page target_page
|
||||
# endif
|
||||
#endif
|
||||
|
||||
bool set_preferred_target_page_bits(int bits)
|
||||
{
|
||||
/*
|
||||
* The target page size is the lowest common denominator for all
|
||||
* the CPUs in the system, so we can only make it smaller, never
|
||||
* larger. And we can't make it smaller once we've committed to
|
||||
* a particular size.
|
||||
*/
|
||||
#ifdef TARGET_PAGE_BITS_VARY
|
||||
assert(bits >= TARGET_PAGE_BITS_MIN);
|
||||
if (init_target_page.bits == 0 || init_target_page.bits > bits) {
|
||||
if (init_target_page.decided) {
|
||||
return false;
|
||||
}
|
||||
init_target_page.bits = bits;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
void finalize_target_page_bits(void)
|
||||
{
|
||||
#ifdef TARGET_PAGE_BITS_VARY
|
||||
if (init_target_page.bits == 0) {
|
||||
init_target_page.bits = TARGET_PAGE_BITS_MIN;
|
||||
}
|
||||
init_target_page.mask = (target_long)-1 << init_target_page.bits;
|
||||
init_target_page.decided = true;
|
||||
|
||||
/*
|
||||
* For the benefit of an -flto build, prevent the compiler from
|
||||
* hoisting a read from target_page before we finish initializing.
|
||||
*/
|
||||
barrier();
|
||||
#endif
|
||||
}
|
34
exec.c
34
exec.c
@ -91,11 +91,6 @@ AddressSpace address_space_memory;
|
||||
static MemoryRegion io_mem_unassigned;
|
||||
#endif
|
||||
|
||||
#ifdef TARGET_PAGE_BITS_VARY
|
||||
int target_page_bits;
|
||||
bool target_page_bits_decided;
|
||||
#endif
|
||||
|
||||
CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
||||
|
||||
/* current CPU in the current thread. It is only valid inside
|
||||
@ -109,37 +104,8 @@ int use_icount;
|
||||
uintptr_t qemu_host_page_size;
|
||||
intptr_t qemu_host_page_mask;
|
||||
|
||||
bool set_preferred_target_page_bits(int bits)
|
||||
{
|
||||
/* The target page size is the lowest common denominator for all
|
||||
* the CPUs in the system, so we can only make it smaller, never
|
||||
* larger. And we can't make it smaller once we've committed to
|
||||
* a particular size.
|
||||
*/
|
||||
#ifdef TARGET_PAGE_BITS_VARY
|
||||
assert(bits >= TARGET_PAGE_BITS_MIN);
|
||||
if (target_page_bits == 0 || target_page_bits > bits) {
|
||||
if (target_page_bits_decided) {
|
||||
return false;
|
||||
}
|
||||
target_page_bits = bits;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
static void finalize_target_page_bits(void)
|
||||
{
|
||||
#ifdef TARGET_PAGE_BITS_VARY
|
||||
if (target_page_bits == 0) {
|
||||
target_page_bits = TARGET_PAGE_BITS_MIN;
|
||||
}
|
||||
target_page_bits_decided = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
typedef struct PhysPageEntry PhysPageEntry;
|
||||
|
||||
struct PhysPageEntry {
|
||||
|
@ -210,17 +210,31 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
|
||||
/* page related stuff */
|
||||
|
||||
#ifdef TARGET_PAGE_BITS_VARY
|
||||
extern bool target_page_bits_decided;
|
||||
extern int target_page_bits;
|
||||
#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \
|
||||
target_page_bits; })
|
||||
typedef struct {
|
||||
bool decided;
|
||||
int bits;
|
||||
target_long mask;
|
||||
} TargetPageBits;
|
||||
#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
|
||||
extern const TargetPageBits target_page;
|
||||
#else
|
||||
extern TargetPageBits target_page;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
|
||||
#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; })
|
||||
#else
|
||||
#define TARGET_PAGE_BITS target_page.bits
|
||||
#define TARGET_PAGE_MASK target_page.mask
|
||||
#endif
|
||||
#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
|
||||
#else
|
||||
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
|
||||
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
|
||||
#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
|
||||
#endif
|
||||
|
||||
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
|
||||
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
|
||||
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
|
||||
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
|
||||
|
||||
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
|
||||
* when intptr_t is 32-bit and we are aligning a long long.
|
||||
@ -228,9 +242,8 @@ extern int target_page_bits;
|
||||
extern uintptr_t qemu_host_page_size;
|
||||
extern intptr_t qemu_host_page_mask;
|
||||
|
||||
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
|
||||
#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
|
||||
qemu_real_host_page_mask)
|
||||
#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
|
||||
#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
|
||||
|
||||
/* same as PROT_xxx */
|
||||
#define PAGE_READ 0x0001
|
||||
|
@ -65,8 +65,8 @@
|
||||
#ifdef SOFTMMU_CODE_ACCESS
|
||||
#define ADDR_READ addr_code
|
||||
#define MMUSUFFIX _cmmu
|
||||
#define URETSUFFIX SUFFIX
|
||||
#define SRETSUFFIX SUFFIX
|
||||
#define URETSUFFIX USUFFIX
|
||||
#define SRETSUFFIX glue(s, SUFFIX)
|
||||
#else
|
||||
#define ADDR_READ addr_read
|
||||
#define MMUSUFFIX _mmu
|
||||
|
@ -74,6 +74,12 @@ void cpu_exec_step_atomic(CPUState *cpu);
|
||||
*/
|
||||
bool set_preferred_target_page_bits(int bits);
|
||||
|
||||
/**
|
||||
* finalize_target_page_bits:
|
||||
* Commit the final value set by set_preferred_target_page_bits.
|
||||
*/
|
||||
void finalize_target_page_bits(void);
|
||||
|
||||
/**
|
||||
* Sends a (part of) iovec down a socket, yielding when the socket is full, or
|
||||
* Receives data into a (part of) iovec from a socket,
|
||||
|
@ -1202,8 +1202,7 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
|
||||
case CRISV10_IND_BCC_M:
|
||||
|
||||
cris_cc_mask(dc, 0);
|
||||
imm = cpu_ldsw_code(env, dc->pc + 2);
|
||||
simm = (int16_t)imm;
|
||||
simm = cpu_ldsw_code(env, dc->pc + 2);
|
||||
simm += 4;
|
||||
|
||||
LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm);
|
||||
|
20
tcg/tcg.h
20
tcg/tcg.h
@ -1269,16 +1269,22 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
|
||||
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
||||
uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
@ -1295,7 +1301,8 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
# define helper_ret_stw_mmu helper_be_stw_mmu
|
||||
# define helper_ret_stl_mmu helper_be_stl_mmu
|
||||
# define helper_ret_stq_mmu helper_be_stq_mmu
|
||||
# define helper_ret_ldw_cmmu helper_be_ldw_cmmu
|
||||
# define helper_ret_lduw_cmmu helper_be_lduw_cmmu
|
||||
# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu
|
||||
# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
|
||||
# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
|
||||
#else
|
||||
@ -1308,7 +1315,8 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
# define helper_ret_stw_mmu helper_le_stw_mmu
|
||||
# define helper_ret_stl_mmu helper_le_stl_mmu
|
||||
# define helper_ret_stq_mmu helper_le_stq_mmu
|
||||
# define helper_ret_ldw_cmmu helper_le_ldw_cmmu
|
||||
# define helper_ret_lduw_cmmu helper_le_lduw_cmmu
|
||||
# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu
|
||||
# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
|
||||
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
|
||||
#endif
|
||||
|
15
tcg/tci.c
15
tcg/tci.c
@ -127,6 +127,12 @@ static void tci_write_reg8(tcg_target_ulong *regs, TCGReg index, uint8_t value)
|
||||
tci_write_reg(regs, index, value);
|
||||
}
|
||||
|
||||
static void
|
||||
tci_write_reg16(tcg_target_ulong *regs, TCGReg index, uint16_t value)
|
||||
{
|
||||
tci_write_reg(regs, index, value);
|
||||
}
|
||||
|
||||
static void
|
||||
tci_write_reg32(tcg_target_ulong *regs, TCGReg index, uint32_t value)
|
||||
{
|
||||
@ -585,6 +591,8 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
|
||||
tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
|
||||
break;
|
||||
case INDEX_op_ld8s_i32:
|
||||
TODO();
|
||||
break;
|
||||
case INDEX_op_ld16u_i32:
|
||||
TODO();
|
||||
break;
|
||||
@ -854,7 +862,14 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
|
||||
tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
|
||||
break;
|
||||
case INDEX_op_ld8s_i64:
|
||||
TODO();
|
||||
break;
|
||||
case INDEX_op_ld16u_i64:
|
||||
t0 = *tb_ptr++;
|
||||
t1 = tci_read_r(regs, &tb_ptr);
|
||||
t2 = tci_read_s32(&tb_ptr);
|
||||
tci_write_reg16(regs, t0, *(uint16_t *)(t1 + t2));
|
||||
break;
|
||||
case INDEX_op_ld16s_i64:
|
||||
TODO();
|
||||
break;
|
||||
|
Loading…
x
Reference in New Issue
Block a user