2003-08-10 21:35:13 +00:00
|
|
|
/*
|
|
|
|
* common defines for all CPUs
|
2007-09-16 21:08:06 +00:00
|
|
|
*
|
2003-08-10 21:35:13 +00:00
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-16 20:47:01 +00:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2003-08-10 21:35:13 +00:00
|
|
|
*/
|
|
|
|
#ifndef CPU_DEFS_H
|
|
|
|
#define CPU_DEFS_H
|
|
|
|
|
2007-11-17 17:14:51 +00:00
|
|
|
#ifndef NEED_CPU_H
|
|
|
|
#error cpu.h included from common code
|
|
|
|
#endif
|
|
|
|
|
2003-08-10 21:35:13 +00:00
|
|
|
#include "config.h"
|
2004-02-16 22:17:43 +00:00
|
|
|
#include <inttypes.h>
|
2012-12-17 17:20:00 +00:00
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qemu/queue.h"
|
2013-05-28 12:02:38 +00:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2012-12-17 17:19:49 +00:00
|
|
|
#include "exec/hwaddr.h"
|
2013-05-28 12:02:38 +00:00
|
|
|
#endif
|
2003-08-10 21:35:13 +00:00
|
|
|
|
2004-01-24 15:26:06 +00:00
|
|
|
#ifndef TARGET_LONG_BITS
|
|
|
|
#error TARGET_LONG_BITS must be defined before including this header
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
|
|
|
|
|
2004-04-25 21:25:15 +00:00
|
|
|
/* target_ulong is the type of a virtual address */
|
2004-01-24 15:26:06 +00:00
|
|
|
#if TARGET_LONG_SIZE == 4
|
2013-04-17 14:26:41 +00:00
|
|
|
typedef int32_t target_long;
|
|
|
|
typedef uint32_t target_ulong;
|
2005-01-03 23:35:10 +00:00
|
|
|
#define TARGET_FMT_lx "%08x"
|
2007-04-04 07:58:14 +00:00
|
|
|
#define TARGET_FMT_ld "%d"
|
2007-09-19 05:46:03 +00:00
|
|
|
#define TARGET_FMT_lu "%u"
|
2004-01-24 15:26:06 +00:00
|
|
|
#elif TARGET_LONG_SIZE == 8
|
2013-04-17 14:26:41 +00:00
|
|
|
typedef int64_t target_long;
|
|
|
|
typedef uint64_t target_ulong;
|
2006-06-25 18:15:32 +00:00
|
|
|
#define TARGET_FMT_lx "%016" PRIx64
|
2007-04-04 07:58:14 +00:00
|
|
|
#define TARGET_FMT_ld "%" PRId64
|
2007-09-19 05:46:03 +00:00
|
|
|
#define TARGET_FMT_lu "%" PRIu64
|
2004-01-24 15:26:06 +00:00
|
|
|
#else
|
|
|
|
#error TARGET_LONG_SIZE undefined
|
|
|
|
#endif
|
|
|
|
|
2005-07-02 22:09:27 +00:00
|
|
|
#define EXCP_INTERRUPT 0x10000 /* async interruption */
|
|
|
|
#define EXCP_HLT 0x10001 /* hlt instruction reached */
|
|
|
|
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
|
2005-11-23 21:02:53 +00:00
|
|
|
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
|
2014-03-10 14:56:30 +00:00
|
|
|
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
|
2003-08-10 21:35:13 +00:00
|
|
|
|
2006-11-12 20:40:55 +00:00
|
|
|
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
|
|
|
|
addresses on the same page. The top bits are the same. This allows
|
|
|
|
TLB invalidation to quickly clear a subset of the hash table. */
|
|
|
|
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
|
|
|
|
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
|
|
|
|
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
|
|
|
|
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
|
|
|
|
|
2010-03-12 16:54:58 +00:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2005-11-28 21:19:04 +00:00
|
|
|
#define CPU_TLB_BITS 8
|
|
|
|
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
|
implementing victim TLB for QEMU system emulated TLB
QEMU system mode page table walks are expensive. Taken by running QEMU
qemu-system-x86_64 system mode on Intel PIN , a TLB miss and walking a
4-level page tables in guest Linux OS takes ~450 X86 instructions on
average.
QEMU system mode TLB is implemented using a directly-mapped hashtable.
This structure suffers from conflict misses. Increasing the
associativity of the TLB may not be the solution to conflict misses as
all the ways may have to be walked in serial.
A victim TLB is a TLB used to hold translations evicted from the
primary TLB upon replacement. The victim TLB lies between the main TLB
and its refill path. Victim TLB is of greater associativity (fully
associative in this patch). It takes longer to lookup the victim TLB,
but its likely better than a full page table walk. The memory
translation path is changed as follows :
Before Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. TLB refill.
5. Do the memory access.
6. Return to code cache.
After Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. Victim TLB lookup.
5. If victim TLB misses, TLB refill
6. Do the memory access.
7. Return to code cache
The advantage is that victim TLB can offer more associativity to a
directly mapped TLB and thus potentially fewer page table walks while
still keeping the time taken to flush within reasonable limits.
However, placing a victim TLB before the refill path increase TLB
refill path as the victim TLB is consulted before the TLB refill. The
performance results demonstrate that the pros outweigh the cons.
some performance results taken on SPECINT2006 train
datasets and kernel boot and qemu configure script on an
Intel(R) Xeon(R) CPU E5620 @ 2.40GHz Linux machine are shown in the
Google Doc link below.
https://docs.google.com/spreadsheets/d/1eiItzekZwNQOal_h-5iJmC4tMDi051m9qidi5_nwvH4/edit?usp=sharing
In summary, victim TLB improves the performance of qemu-system-x86_64 by
11% on average on SPECINT2006, kernelboot and qemu configscript and with
highest improvement of in 26% in 456.hmmer. And victim TLB does not result
in any performance degradation in any of the measured benchmarks. Furthermore,
the implemented victim TLB is architecture independent and is expected to
benefit other architectures in QEMU as well.
Although there are measurement fluctuations, the performance
improvement is very significant and by no means in the range of
noises.
Signed-off-by: Xin Tong <trent.tong@gmail.com>
Message-id: 1407202523-23553-1-git-send-email-trent.tong@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-08-05 01:35:23 +00:00
|
|
|
/* use a fully associative victim tlb of 8 entries */
|
|
|
|
#define CPU_VTLB_SIZE 8
|
2003-08-10 21:35:13 +00:00
|
|
|
|
2010-04-04 23:28:53 +00:00
|
|
|
#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
|
2008-01-31 09:22:27 +00:00
|
|
|
#define CPU_TLB_ENTRY_BITS 4
|
|
|
|
#else
|
|
|
|
#define CPU_TLB_ENTRY_BITS 5
|
|
|
|
#endif
|
|
|
|
|
2003-08-10 21:35:13 +00:00
|
|
|
typedef struct CPUTLBEntry {
|
2008-06-09 00:20:13 +00:00
|
|
|
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
|
|
|
|
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
|
|
|
|
go directly to ram.
|
2003-10-27 21:12:17 +00:00
|
|
|
bit 3 : indicates that the entry is invalid
|
|
|
|
bit 2..0 : zero
|
|
|
|
*/
|
2007-09-16 21:08:06 +00:00
|
|
|
target_ulong addr_read;
|
|
|
|
target_ulong addr_write;
|
|
|
|
target_ulong addr_code;
|
2010-04-04 23:28:53 +00:00
|
|
|
/* Addend to virtual address to get host address. IO accesses
|
2008-11-29 13:33:23 +00:00
|
|
|
use the corresponding iotlb value. */
|
2012-04-12 18:29:36 +00:00
|
|
|
uintptr_t addend;
|
2008-01-31 09:22:27 +00:00
|
|
|
/* padding to get a power of two size */
|
2012-04-12 18:29:36 +00:00
|
|
|
uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
|
|
|
|
(sizeof(target_ulong) * 3 +
|
|
|
|
((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
|
|
|
|
sizeof(uintptr_t))];
|
2003-08-10 21:35:13 +00:00
|
|
|
} CPUTLBEntry;
|
|
|
|
|
2013-06-04 16:51:59 +00:00
|
|
|
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
2010-04-04 23:28:53 +00:00
|
|
|
|
2015-04-26 15:49:23 +00:00
|
|
|
/* The IOTLB is not accessed directly inline by generated TCG code,
|
|
|
|
* so the CPUIOTLBEntry layout is not as critical as that of the
|
|
|
|
* CPUTLBEntry. (This is also why we don't want to combine the two
|
|
|
|
* structs into one.)
|
|
|
|
*/
|
|
|
|
typedef struct CPUIOTLBEntry {
|
|
|
|
hwaddr addr;
|
|
|
|
} CPUIOTLBEntry;
|
|
|
|
|
2010-03-12 16:54:58 +00:00
|
|
|
#define CPU_COMMON_TLB \
|
|
|
|
/* The meaning of the MMU modes is defined in the target code. */ \
|
|
|
|
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
implementing victim TLB for QEMU system emulated TLB
QEMU system mode page table walks are expensive. Taken by running QEMU
qemu-system-x86_64 system mode on Intel PIN , a TLB miss and walking a
4-level page tables in guest Linux OS takes ~450 X86 instructions on
average.
QEMU system mode TLB is implemented using a directly-mapped hashtable.
This structure suffers from conflict misses. Increasing the
associativity of the TLB may not be the solution to conflict misses as
all the ways may have to be walked in serial.
A victim TLB is a TLB used to hold translations evicted from the
primary TLB upon replacement. The victim TLB lies between the main TLB
and its refill path. Victim TLB is of greater associativity (fully
associative in this patch). It takes longer to lookup the victim TLB,
but its likely better than a full page table walk. The memory
translation path is changed as follows :
Before Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. TLB refill.
5. Do the memory access.
6. Return to code cache.
After Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. Victim TLB lookup.
5. If victim TLB misses, TLB refill
6. Do the memory access.
7. Return to code cache
The advantage is that victim TLB can offer more associativity to a
directly mapped TLB and thus potentially fewer page table walks while
still keeping the time taken to flush within reasonable limits.
However, placing a victim TLB before the refill path increase TLB
refill path as the victim TLB is consulted before the TLB refill. The
performance results demonstrate that the pros outweigh the cons.
some performance results taken on SPECINT2006 train
datasets and kernel boot and qemu configure script on an
Intel(R) Xeon(R) CPU E5620 @ 2.40GHz Linux machine are shown in the
Google Doc link below.
https://docs.google.com/spreadsheets/d/1eiItzekZwNQOal_h-5iJmC4tMDi051m9qidi5_nwvH4/edit?usp=sharing
In summary, victim TLB improves the performance of qemu-system-x86_64 by
11% on average on SPECINT2006, kernelboot and qemu configscript and with
highest improvement of in 26% in 456.hmmer. And victim TLB does not result
in any performance degradation in any of the measured benchmarks. Furthermore,
the implemented victim TLB is architecture independent and is expected to
benefit other architectures in QEMU as well.
Although there are measurement fluctuations, the performance
improvement is very significant and by no means in the range of
noises.
Signed-off-by: Xin Tong <trent.tong@gmail.com>
Message-id: 1407202523-23553-1-git-send-email-trent.tong@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-08-05 01:35:23 +00:00
|
|
|
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
2015-04-26 15:49:23 +00:00
|
|
|
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
|
|
|
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
2010-03-17 02:14:28 +00:00
|
|
|
target_ulong tlb_flush_addr; \
|
implementing victim TLB for QEMU system emulated TLB
QEMU system mode page table walks are expensive. Taken by running QEMU
qemu-system-x86_64 system mode on Intel PIN , a TLB miss and walking a
4-level page tables in guest Linux OS takes ~450 X86 instructions on
average.
QEMU system mode TLB is implemented using a directly-mapped hashtable.
This structure suffers from conflict misses. Increasing the
associativity of the TLB may not be the solution to conflict misses as
all the ways may have to be walked in serial.
A victim TLB is a TLB used to hold translations evicted from the
primary TLB upon replacement. The victim TLB lies between the main TLB
and its refill path. Victim TLB is of greater associativity (fully
associative in this patch). It takes longer to lookup the victim TLB,
but its likely better than a full page table walk. The memory
translation path is changed as follows :
Before Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. TLB refill.
5. Do the memory access.
6. Return to code cache.
After Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. Victim TLB lookup.
5. If victim TLB misses, TLB refill
6. Do the memory access.
7. Return to code cache
The advantage is that victim TLB can offer more associativity to a
directly mapped TLB and thus potentially fewer page table walks while
still keeping the time taken to flush within reasonable limits.
However, placing a victim TLB before the refill path increase TLB
refill path as the victim TLB is consulted before the TLB refill. The
performance results demonstrate that the pros outweigh the cons.
some performance results taken on SPECINT2006 train
datasets and kernel boot and qemu configure script on an
Intel(R) Xeon(R) CPU E5620 @ 2.40GHz Linux machine are shown in the
Google Doc link below.
https://docs.google.com/spreadsheets/d/1eiItzekZwNQOal_h-5iJmC4tMDi051m9qidi5_nwvH4/edit?usp=sharing
In summary, victim TLB improves the performance of qemu-system-x86_64 by
11% on average on SPECINT2006, kernelboot and qemu configscript and with
highest improvement of in 26% in 456.hmmer. And victim TLB does not result
in any performance degradation in any of the measured benchmarks. Furthermore,
the implemented victim TLB is architecture independent and is expected to
benefit other architectures in QEMU as well.
Although there are measurement fluctuations, the performance
improvement is very significant and by no means in the range of
noises.
Signed-off-by: Xin Tong <trent.tong@gmail.com>
Message-id: 1407202523-23553-1-git-send-email-trent.tong@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-08-05 01:35:23 +00:00
|
|
|
target_ulong tlb_flush_mask; \
|
|
|
|
target_ulong vtlb_index; \
|
2010-03-12 16:54:58 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define CPU_COMMON_TLB
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2008-04-08 19:29:54 +00:00
|
|
|
#define CPU_TEMP_BUF_NLONGS 128
|
2005-11-20 10:32:34 +00:00
|
|
|
#define CPU_COMMON \
|
|
|
|
/* soft mmu support */ \
|
2010-03-12 16:54:58 +00:00
|
|
|
CPU_COMMON_TLB \
|
2005-11-20 10:32:34 +00:00
|
|
|
|
2003-08-10 21:35:13 +00:00
|
|
|
#endif
|