2008-02-01 18:05:41 +08:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* define it to use liveness analysis (better code) */
|
2011-07-07 20:37:12 +08:00
|
|
|
#define USE_TCG_OPTIMIZATIONS
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2016-01-27 02:17:08 +08:00
|
|
|
#include "qemu/osdep.h"
|
2009-04-16 17:58:30 +08:00
|
|
|
|
2012-03-20 03:25:11 +08:00
|
|
|
/* Define to jump the ELF file used to communicate with GDB. */
|
|
|
|
#undef DEBUG_JIT
|
|
|
|
|
2018-10-10 22:48:53 +08:00
|
|
|
#include "qemu/error-report.h"
|
2016-03-21 01:16:19 +08:00
|
|
|
#include "qemu/cutils.h"
|
2012-12-18 01:20:00 +08:00
|
|
|
#include "qemu/host-utils.h"
|
|
|
|
#include "qemu/timer.h"
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2014-02-22 03:52:39 +08:00
|
|
|
/* Note: the long term plan is to reduce the dependencies on the QEMU
|
2008-02-01 18:05:41 +08:00
|
|
|
CPU definitions. Currently they are used for qemu_ld/st
|
|
|
|
instructions */
|
|
|
|
#define NO_CPU_IO_DEFS
|
|
|
|
#include "cpu.h"
|
|
|
|
|
2016-03-15 20:18:37 +08:00
|
|
|
#include "exec/cpu-common.h"
|
|
|
|
#include "exec/exec-all.h"
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
#include "tcg-op.h"
|
2012-03-20 03:25:11 +08:00
|
|
|
|
2013-08-21 08:20:30 +08:00
|
|
|
#if UINTPTR_MAX == UINT32_MAX
|
2012-03-20 03:25:11 +08:00
|
|
|
# define ELF_CLASS ELFCLASS32
|
2013-08-21 08:20:30 +08:00
|
|
|
#else
|
|
|
|
# define ELF_CLASS ELFCLASS64
|
2012-03-20 03:25:11 +08:00
|
|
|
#endif
|
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
|
|
|
# define ELF_DATA ELFDATA2MSB
|
|
|
|
#else
|
|
|
|
# define ELF_DATA ELFDATA2LSB
|
|
|
|
#endif
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
#include "elf.h"
|
2016-01-07 21:55:28 +08:00
|
|
|
#include "exec/log.h"
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
#include "sysemu/sysemu.h"
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2016-02-23 22:49:41 +08:00
|
|
|
/* Forward declarations for functions declared in tcg-target.inc.c and
|
|
|
|
used here. */
|
2010-06-03 08:26:56 +08:00
|
|
|
static void tcg_target_init(TCGContext *s);
|
2016-11-18 16:31:40 +08:00
|
|
|
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
|
2010-06-03 08:26:56 +08:00
|
|
|
static void tcg_target_qemu_prologue(TCGContext *s);
|
2018-12-01 03:52:48 +08:00
|
|
|
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
|
2013-08-21 06:30:10 +08:00
|
|
|
intptr_t value, intptr_t addend);
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2013-06-05 22:39:57 +08:00
|
|
|
/* The CIE and FDE header definitions will be common to all hosts. */
|
|
|
|
typedef struct {
|
|
|
|
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
|
|
|
uint32_t id;
|
|
|
|
uint8_t version;
|
|
|
|
char augmentation[1];
|
|
|
|
uint8_t code_align;
|
|
|
|
uint8_t data_align;
|
|
|
|
uint8_t return_column;
|
|
|
|
} DebugFrameCIE;
|
|
|
|
|
|
|
|
typedef struct QEMU_PACKED {
|
|
|
|
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
|
|
|
uint32_t cie_offset;
|
2013-08-21 08:20:30 +08:00
|
|
|
uintptr_t func_start;
|
|
|
|
uintptr_t func_len;
|
2013-06-05 22:39:57 +08:00
|
|
|
} DebugFrameFDEHeader;
|
|
|
|
|
2014-05-16 03:48:01 +08:00
|
|
|
typedef struct QEMU_PACKED {
|
|
|
|
DebugFrameCIE cie;
|
|
|
|
DebugFrameFDEHeader fde;
|
|
|
|
} DebugFrameHeader;
|
|
|
|
|
2012-03-20 03:25:11 +08:00
|
|
|
static void tcg_register_jit_int(void *buf, size_t size,
|
2014-05-16 03:48:01 +08:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-20 03:25:11 +08:00
|
|
|
__attribute__((unused));
|
|
|
|
|
2016-02-23 22:49:41 +08:00
|
|
|
/* Forward declarations for functions declared and used in tcg-target.inc.c. */
|
2016-11-18 18:50:59 +08:00
|
|
|
static const char *target_parse_constraint(TCGArgConstraint *ct,
|
|
|
|
const char *ct_str, TCGType type);
|
2011-11-09 16:03:34 +08:00
|
|
|
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
|
2013-08-21 08:07:26 +08:00
|
|
|
intptr_t arg2);
|
2011-11-09 16:03:34 +08:00
|
|
|
static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
|
2011-09-18 04:00:29 +08:00
|
|
|
static void tcg_out_movi(TCGContext *s, TCGType type,
|
2011-11-09 16:03:34 +08:00
|
|
|
TCGReg ret, tcg_target_long arg);
|
2011-09-18 04:00:29 +08:00
|
|
|
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
|
|
|
|
const int *const_args);
|
2017-09-15 04:53:46 +08:00
|
|
|
#if TCG_TARGET_MAYBE_vec
|
|
|
|
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
|
|
|
|
unsigned vece, const TCGArg *args,
|
|
|
|
const int *const_args);
|
|
|
|
#else
|
|
|
|
static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
|
|
|
|
unsigned vece, const TCGArg *args,
|
|
|
|
const int *const_args)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|
2011-11-09 16:03:34 +08:00
|
|
|
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
|
2013-08-21 08:07:26 +08:00
|
|
|
intptr_t arg2);
|
2016-06-20 13:59:13 +08:00
|
|
|
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|
|
|
TCGReg base, intptr_t ofs);
|
2014-03-23 11:06:52 +08:00
|
|
|
static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
|
2014-03-31 12:22:11 +08:00
|
|
|
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
2011-09-18 04:00:29 +08:00
|
|
|
const TCGArgConstraint *arg_ct);
|
2017-07-31 03:30:41 +08:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
|
|
|
static bool tcg_out_ldst_finalize(TCGContext *s);
|
|
|
|
#endif
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2017-07-08 07:00:30 +08:00
|
|
|
#define TCG_HIGHWATER 1024
|
|
|
|
|
2017-07-13 06:26:40 +08:00
|
|
|
static TCGContext **tcg_ctxs;
|
|
|
|
static unsigned int n_tcg_ctxs;
|
2017-10-11 05:34:37 +08:00
|
|
|
TCGv_env cpu_env = 0;
|
2017-07-13 06:26:40 +08:00
|
|
|
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-27 04:58:05 +08:00
|
|
|
struct tcg_region_tree {
|
|
|
|
QemuMutex lock;
|
|
|
|
GTree *tree;
|
|
|
|
/* padding to avoid false sharing is computed at run-time */
|
|
|
|
};
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
/*
|
|
|
|
* We divide code_gen_buffer into equally-sized "regions" that TCG threads
|
|
|
|
* dynamically allocate from as demand dictates. Given appropriate region
|
|
|
|
* sizing, this minimizes flushes even when some TCG threads generate a lot
|
|
|
|
* more code than others.
|
|
|
|
*/
|
|
|
|
struct tcg_region_state {
|
|
|
|
QemuMutex lock;
|
|
|
|
|
|
|
|
/* fields set at init time */
|
|
|
|
void *start;
|
|
|
|
void *start_aligned;
|
|
|
|
void *end;
|
|
|
|
size_t n;
|
|
|
|
size_t size; /* size of one region */
|
|
|
|
size_t stride; /* .size + guard size */
|
|
|
|
|
|
|
|
/* fields protected by the lock */
|
|
|
|
size_t current; /* current region index */
|
|
|
|
size_t agg_size_full; /* aggregate size of full regions */
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct tcg_region_state region;
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-27 04:58:05 +08:00
|
|
|
/*
|
|
|
|
* This is an array of struct tcg_region_tree's, with padding.
|
|
|
|
* We use void * to simplify the computation of region_trees[i]; each
|
|
|
|
* struct is found every tree_size bytes.
|
|
|
|
*/
|
|
|
|
static void *region_trees;
|
|
|
|
static size_t tree_size;
|
2017-09-15 04:53:46 +08:00
|
|
|
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
|
2008-10-26 21:43:07 +08:00
|
|
|
static TCGRegSet tcg_target_call_clobber_regs;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2014-03-29 03:56:22 +08:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE == 1
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
}
|
|
|
|
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
|
|
|
|
uint8_t v)
|
2014-03-28 23:29:48 +08:00
|
|
|
{
|
2014-03-29 03:56:22 +08:00
|
|
|
*p = v;
|
2014-03-28 23:29:48 +08:00
|
|
|
}
|
2014-03-29 03:56:22 +08:00
|
|
|
#endif
|
2014-03-28 23:29:48 +08:00
|
|
|
|
2014-03-29 03:56:22 +08:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 2
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2014-03-29 03:56:22 +08:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
|
|
|
|
uint16_t v)
|
2014-03-28 23:29:48 +08:00
|
|
|
{
|
2014-03-29 03:56:22 +08:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 23:29:48 +08:00
|
|
|
}
|
2014-03-29 03:56:22 +08:00
|
|
|
#endif
|
2014-03-28 23:29:48 +08:00
|
|
|
|
2014-03-29 03:56:22 +08:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 4
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2014-03-29 03:56:22 +08:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
|
|
|
|
uint32_t v)
|
2014-03-28 23:29:48 +08:00
|
|
|
{
|
2014-03-29 03:56:22 +08:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 23:29:48 +08:00
|
|
|
}
|
2014-03-29 03:56:22 +08:00
|
|
|
#endif
|
2014-03-28 23:29:48 +08:00
|
|
|
|
2014-03-29 03:56:22 +08:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 8
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
|
2013-07-26 03:42:17 +08:00
|
|
|
{
|
2014-03-29 03:56:22 +08:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2013-07-26 03:42:17 +08:00
|
|
|
}
|
|
|
|
|
2014-06-08 01:08:44 +08:00
|
|
|
static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
|
|
|
|
uint64_t v)
|
2014-03-28 23:29:48 +08:00
|
|
|
{
|
2014-03-29 03:56:22 +08:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 23:29:48 +08:00
|
|
|
}
|
2014-03-29 03:56:22 +08:00
|
|
|
#endif
|
2014-03-28 23:29:48 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
/* label relocation processing */
|
|
|
|
|
2014-03-29 03:56:22 +08:00
|
|
|
static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
|
2015-02-14 05:39:54 +08:00
|
|
|
TCGLabel *l, intptr_t addend)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
|
|
|
TCGRelocation *r;
|
|
|
|
|
|
|
|
if (l->has_value) {
|
2008-02-10 22:09:09 +08:00
|
|
|
/* FIXME: This may break relocations on RISC targets that
|
|
|
|
modify instruction fields in place. The caller may not have
|
|
|
|
written the initial value. */
|
2018-12-01 03:52:48 +08:00
|
|
|
bool ok = patch_reloc(code_ptr, type, l->u.value, addend);
|
|
|
|
tcg_debug_assert(ok);
|
2008-02-01 18:05:41 +08:00
|
|
|
} else {
|
|
|
|
/* add a new relocation entry */
|
|
|
|
r = tcg_malloc(sizeof(TCGRelocation));
|
|
|
|
r->type = type;
|
|
|
|
r->ptr = code_ptr;
|
|
|
|
r->addend = addend;
|
|
|
|
r->next = l->u.first_reloc;
|
|
|
|
l->u.first_reloc = r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-14 05:39:54 +08:00
|
|
|
static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2013-08-21 06:30:10 +08:00
|
|
|
intptr_t value = (intptr_t)ptr;
|
2014-03-29 03:56:22 +08:00
|
|
|
TCGRelocation *r;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2016-04-21 16:48:49 +08:00
|
|
|
tcg_debug_assert(!l->has_value);
|
2014-03-29 03:56:22 +08:00
|
|
|
|
|
|
|
for (r = l->u.first_reloc; r != NULL; r = r->next) {
|
2018-12-01 03:52:48 +08:00
|
|
|
bool ok = patch_reloc(r->ptr, r->type, value, r->addend);
|
|
|
|
tcg_debug_assert(ok);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2014-03-29 03:56:22 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
l->has_value = 1;
|
2014-03-29 03:56:22 +08:00
|
|
|
l->u.value_ptr = ptr;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2015-02-14 04:51:55 +08:00
|
|
|
TCGLabel *gen_new_label(void)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-07-13 05:15:52 +08:00
|
|
|
TCGContext *s = tcg_ctx;
|
2015-02-14 10:51:05 +08:00
|
|
|
TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2015-02-14 10:51:05 +08:00
|
|
|
*l = (TCGLabel){
|
|
|
|
.id = s->nb_labels++
|
|
|
|
};
|
2015-02-14 04:51:55 +08:00
|
|
|
|
|
|
|
return l;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2018-06-15 13:57:03 +08:00
|
|
|
static void set_jmp_reset_offset(TCGContext *s, int which)
|
|
|
|
{
|
|
|
|
size_t off = tcg_current_code_size(s);
|
|
|
|
s->tb_jmp_reset_offset[which] = off;
|
|
|
|
/* Make sure that we didn't overflow the stored offset. */
|
|
|
|
assert(s->tb_jmp_reset_offset[which] == off);
|
|
|
|
}
|
|
|
|
|
2016-02-23 22:49:41 +08:00
|
|
|
#include "tcg-target.inc.c"
|
2008-02-01 18:05:41 +08:00
|
|
|
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-27 04:58:05 +08:00
|
|
|
/* compare a pointer @ptr and a tb_tc @s */
|
|
|
|
static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
|
|
|
|
{
|
|
|
|
if (ptr >= s->ptr + s->size) {
|
|
|
|
return 1;
|
|
|
|
} else if (ptr < s->ptr) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
|
|
|
|
{
|
|
|
|
const struct tb_tc *a = ap;
|
|
|
|
const struct tb_tc *b = bp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When both sizes are set, we know this isn't a lookup.
|
|
|
|
* This is the most likely case: every TB must be inserted; lookups
|
|
|
|
* are a lot less frequent.
|
|
|
|
*/
|
|
|
|
if (likely(a->size && b->size)) {
|
|
|
|
if (a->ptr > b->ptr) {
|
|
|
|
return 1;
|
|
|
|
} else if (a->ptr < b->ptr) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* a->ptr == b->ptr should happen only on deletions */
|
|
|
|
g_assert(a->size == b->size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* All lookups have either .size field set to 0.
|
|
|
|
* From the glib sources we see that @ap is always the lookup key. However
|
|
|
|
* the docs provide no guarantee, so we just mark this case as likely.
|
|
|
|
*/
|
|
|
|
if (likely(a->size == 0)) {
|
|
|
|
return ptr_cmp_tb_tc(a->ptr, b);
|
|
|
|
}
|
|
|
|
return ptr_cmp_tb_tc(b->ptr, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_trees_init(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
|
|
|
|
region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
qemu_mutex_init(&rt->lock);
|
|
|
|
rt->tree = g_tree_new(tb_tc_cmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct tcg_region_tree *tc_ptr_to_region_tree(void *p)
|
|
|
|
{
|
|
|
|
size_t region_idx;
|
|
|
|
|
|
|
|
if (p < region.start_aligned) {
|
|
|
|
region_idx = 0;
|
|
|
|
} else {
|
|
|
|
ptrdiff_t offset = p - region.start_aligned;
|
|
|
|
|
|
|
|
if (offset > region.stride * (region.n - 1)) {
|
|
|
|
region_idx = region.n - 1;
|
|
|
|
} else {
|
|
|
|
region_idx = offset / region.stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return region_trees + region_idx * tree_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_tb_insert(TranslationBlock *tb)
|
|
|
|
{
|
|
|
|
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
g_tree_insert(rt->tree, &tb->tc, tb);
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_tb_remove(TranslationBlock *tb)
|
|
|
|
{
|
|
|
|
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
g_tree_remove(rt->tree, &tb->tc);
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the TB 'tb' such that
|
|
|
|
* tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
|
|
|
|
* Return NULL if not found.
|
|
|
|
*/
|
|
|
|
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
|
|
|
|
{
|
|
|
|
struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
|
|
|
|
TranslationBlock *tb;
|
|
|
|
struct tb_tc s = { .ptr = (void *)tc_ptr };
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
tb = g_tree_lookup(rt->tree, &s);
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
return tb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_tree_lock_all(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_tree_unlock_all(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tcg_region_tree_lock_all();
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
g_tree_foreach(rt->tree, func, user_data);
|
|
|
|
}
|
|
|
|
tcg_region_tree_unlock_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t tcg_nb_tbs(void)
|
|
|
|
{
|
|
|
|
size_t nb_tbs = 0;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tcg_region_tree_lock_all();
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
nb_tbs += g_tree_nnodes(rt->tree);
|
|
|
|
}
|
|
|
|
tcg_region_tree_unlock_all();
|
|
|
|
return nb_tbs;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_tree_reset_all(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tcg_region_tree_lock_all();
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
/* Increment the refcount first so that destroy acts as a reset */
|
|
|
|
g_tree_ref(rt->tree);
|
|
|
|
g_tree_destroy(rt->tree);
|
|
|
|
}
|
|
|
|
tcg_region_tree_unlock_all();
|
|
|
|
}
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
|
|
|
|
{
|
|
|
|
void *start, *end;
|
|
|
|
|
|
|
|
start = region.start_aligned + curr_region * region.stride;
|
|
|
|
end = start + region.size;
|
|
|
|
|
|
|
|
if (curr_region == 0) {
|
|
|
|
start = region.start;
|
|
|
|
}
|
|
|
|
if (curr_region == region.n - 1) {
|
|
|
|
end = region.end;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pstart = start;
|
|
|
|
*pend = end;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_assign(TCGContext *s, size_t curr_region)
|
|
|
|
{
|
|
|
|
void *start, *end;
|
|
|
|
|
|
|
|
tcg_region_bounds(curr_region, &start, &end);
|
|
|
|
|
|
|
|
s->code_gen_buffer = start;
|
|
|
|
s->code_gen_ptr = start;
|
|
|
|
s->code_gen_buffer_size = end - start;
|
|
|
|
s->code_gen_highwater = end - TCG_HIGHWATER;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool tcg_region_alloc__locked(TCGContext *s)
|
|
|
|
{
|
|
|
|
if (region.current == region.n) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
tcg_region_assign(s, region.current);
|
|
|
|
region.current++;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request a new region once the one in use has filled up.
|
|
|
|
* Returns true on error.
|
|
|
|
*/
|
|
|
|
static bool tcg_region_alloc(TCGContext *s)
|
|
|
|
{
|
|
|
|
bool err;
|
|
|
|
/* read the region size now; alloc__locked will overwrite it on success */
|
|
|
|
size_t size_full = s->code_gen_buffer_size;
|
|
|
|
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
err = tcg_region_alloc__locked(s);
|
|
|
|
if (!err) {
|
|
|
|
region.agg_size_full += size_full - TCG_HIGHWATER;
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform a context's first region allocation.
|
|
|
|
* This function does _not_ increment region.agg_size_full.
|
|
|
|
*/
|
|
|
|
static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
|
|
|
|
{
|
|
|
|
return tcg_region_alloc__locked(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Call from a safe-work context */
|
|
|
|
void tcg_region_reset_all(void)
|
|
|
|
{
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
region.current = 0;
|
|
|
|
region.agg_size_full = 0;
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
|
|
|
TCGContext *s = atomic_read(&tcg_ctxs[i]);
|
|
|
|
bool err = tcg_region_initial_alloc__locked(s);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
|
|
|
|
g_assert(!err);
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-27 04:58:05 +08:00
|
|
|
|
|
|
|
tcg_region_tree_reset_all();
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
}
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
static size_t tcg_n_regions(void)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* It is likely that some vCPUs will translate more code than others, so we
|
|
|
|
* first try to set more regions than max_cpus, with those regions being of
|
|
|
|
* reasonable size. If that's not possible we make do by evenly dividing
|
|
|
|
* the code_gen_buffer among the vCPUs.
|
|
|
|
*/
|
|
|
|
static size_t tcg_n_regions(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* Use a single region if all we have is one vCPU thread */
|
|
|
|
if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to have more regions than max_cpus, with each region being >= 2 MB */
|
|
|
|
for (i = 8; i > 0; i--) {
|
|
|
|
size_t regions_per_thread = i;
|
|
|
|
size_t region_size;
|
|
|
|
|
|
|
|
region_size = tcg_init_ctx.code_gen_buffer_size;
|
|
|
|
region_size /= max_cpus * regions_per_thread;
|
|
|
|
|
|
|
|
if (region_size >= 2 * 1024u * 1024) {
|
|
|
|
return max_cpus * regions_per_thread;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* If we can't, then just allocate one region per vCPU thread */
|
|
|
|
return max_cpus;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
/*
|
|
|
|
* Initializes region partitioning.
|
|
|
|
*
|
|
|
|
* Called at init time from the parent thread (i.e. the one calling
|
|
|
|
* tcg_context_init), after the target's TCG globals have been set.
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
*
|
|
|
|
* Region partitioning works by splitting code_gen_buffer into separate regions,
|
|
|
|
* and then assigning regions to TCG threads so that the threads can translate
|
|
|
|
* code in parallel without synchronization.
|
|
|
|
*
|
|
|
|
* In softmmu the number of TCG threads is bounded by max_cpus, so we use at
|
|
|
|
* least max_cpus regions in MTTCG. In !MTTCG we use a single region.
|
|
|
|
* Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
|
|
|
|
* must have been parsed before calling this function, since it calls
|
|
|
|
* qemu_tcg_mttcg_enabled().
|
|
|
|
*
|
|
|
|
* In user-mode we use a single region. Having multiple regions in user-mode
|
|
|
|
* is not supported, because the number of vCPU threads (recall that each thread
|
|
|
|
* spawned by the guest corresponds to a vCPU thread) is only bounded by the
|
|
|
|
* OS, and usually this number is huge (tens of thousands is not uncommon).
|
|
|
|
* Thus, given this large bound on the number of vCPU threads and the fact
|
|
|
|
* that code_gen_buffer is allocated at compile-time, we cannot guarantee
|
|
|
|
* that the availability of at least one region per vCPU thread.
|
|
|
|
*
|
|
|
|
* However, this user-mode limitation is unlikely to be a significant problem
|
|
|
|
* in practice. Multi-threaded guests share most if not all of their translated
|
|
|
|
* code, which makes parallel code generation less appealing than in softmmu.
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
*/
|
|
|
|
void tcg_region_init(void)
|
|
|
|
{
|
|
|
|
void *buf = tcg_init_ctx.code_gen_buffer;
|
|
|
|
void *aligned;
|
|
|
|
size_t size = tcg_init_ctx.code_gen_buffer_size;
|
|
|
|
size_t page_size = qemu_real_host_page_size;
|
|
|
|
size_t region_size;
|
|
|
|
size_t n_regions;
|
|
|
|
size_t i;
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
n_regions = tcg_n_regions();
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
|
|
|
|
/* The first region will be 'aligned - buf' bytes larger than the others */
|
|
|
|
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
|
|
|
|
g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
|
|
|
|
/*
|
|
|
|
* Make region_size a multiple of page_size, using aligned as the start.
|
|
|
|
* As a result of this we might end up with a few extra pages at the end of
|
|
|
|
* the buffer; we will assign those to the last region.
|
|
|
|
*/
|
|
|
|
region_size = (size - (aligned - buf)) / n_regions;
|
|
|
|
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
|
|
|
|
|
|
|
|
/* A region must have at least 2 pages; one code, one guard */
|
|
|
|
g_assert(region_size >= 2 * page_size);
|
|
|
|
|
|
|
|
/* init the region struct */
|
|
|
|
qemu_mutex_init(®ion.lock);
|
|
|
|
region.n = n_regions;
|
|
|
|
region.size = region_size - page_size;
|
|
|
|
region.stride = region_size;
|
|
|
|
region.start = buf;
|
|
|
|
region.start_aligned = aligned;
|
|
|
|
/* page-align the end, since its last page will be a guard page */
|
|
|
|
region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
|
|
|
|
/* account for that last guard page */
|
|
|
|
region.end -= page_size;
|
|
|
|
|
|
|
|
/* set guard pages */
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
void *start, *end;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
tcg_region_bounds(i, &start, &end);
|
|
|
|
rc = qemu_mprotect_none(end, page_size);
|
|
|
|
g_assert(!rc);
|
|
|
|
}
|
|
|
|
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-27 04:58:05 +08:00
|
|
|
tcg_region_trees_init();
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
/* In user-mode we support only one ctx, so do the initial allocation now */
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
{
|
|
|
|
bool err = tcg_region_initial_alloc__locked(tcg_ctx);
|
|
|
|
|
|
|
|
g_assert(!err);
|
|
|
|
}
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All TCG threads except the parent (i.e. the one that called tcg_context_init
|
|
|
|
* and registered the target's TCG globals) must register with this function
|
|
|
|
* before initiating translation.
|
|
|
|
*
|
|
|
|
* In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
|
|
|
|
* of tcg_region_init() for the reasoning behind this.
|
|
|
|
*
|
|
|
|
* In softmmu each caller registers its context in tcg_ctxs[]. Note that in
|
|
|
|
* softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
|
|
|
|
* is not used anymore for translation once this function is called.
|
|
|
|
*
|
|
|
|
* Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
|
|
|
|
* over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
void tcg_register_thread(void)
|
|
|
|
{
|
|
|
|
tcg_ctx = &tcg_init_ctx;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void tcg_register_thread(void)
|
|
|
|
{
|
|
|
|
TCGContext *s = g_malloc(sizeof(*s));
|
|
|
|
unsigned int i, n;
|
|
|
|
bool err;
|
|
|
|
|
|
|
|
*s = tcg_init_ctx;
|
|
|
|
|
|
|
|
/* Relink mem_base. */
|
|
|
|
for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
|
|
|
|
if (tcg_init_ctx.temps[i].mem_base) {
|
|
|
|
ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
|
|
|
|
tcg_debug_assert(b >= 0 && b < n);
|
|
|
|
s->temps[i].mem_base = &s->temps[b];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Claim an entry in tcg_ctxs */
|
|
|
|
n = atomic_fetch_inc(&n_tcg_ctxs);
|
|
|
|
g_assert(n < max_cpus);
|
|
|
|
atomic_set(&tcg_ctxs[n], s);
|
|
|
|
|
|
|
|
tcg_ctx = s;
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
err = tcg_region_initial_alloc__locked(tcg_ctx);
|
|
|
|
g_assert(!err);
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
}
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns the size (in bytes) of all translated code (i.e. from all regions)
|
|
|
|
* currently in the cache.
|
|
|
|
* See also: tcg_code_capacity()
|
|
|
|
* Do not confuse with tcg_current_code_size(); that one applies to a single
|
|
|
|
* TCG context.
|
|
|
|
*/
|
|
|
|
size_t tcg_code_size(void)
|
|
|
|
{
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
unsigned int i;
|
|
|
|
size_t total;
|
|
|
|
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
total = region.agg_size_full;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
|
|
|
const TCGContext *s = atomic_read(&tcg_ctxs[i]);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
size_t size;
|
|
|
|
|
|
|
|
size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
|
|
|
|
g_assert(size <= s->code_gen_buffer_size);
|
|
|
|
total += size;
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns the code capacity (in bytes) of the entire cache, i.e. including all
|
|
|
|
* regions.
|
|
|
|
* See also: tcg_code_size()
|
|
|
|
*/
|
|
|
|
size_t tcg_code_capacity(void)
|
|
|
|
{
|
|
|
|
size_t guard_size, capacity;
|
|
|
|
|
|
|
|
/* no need for synchronization; these variables are set at init time */
|
|
|
|
guard_size = region.stride - region.size;
|
|
|
|
capacity = region.end + guard_size - region.start;
|
|
|
|
capacity -= region.n * (guard_size + TCG_HIGHWATER);
|
|
|
|
return capacity;
|
|
|
|
}
|
|
|
|
|
2017-08-02 03:11:12 +08:00
|
|
|
size_t tcg_tb_phys_invalidate_count(void)
|
|
|
|
{
|
|
|
|
unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
|
|
|
|
unsigned int i;
|
|
|
|
size_t total = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
|
|
|
const TCGContext *s = atomic_read(&tcg_ctxs[i]);
|
|
|
|
|
|
|
|
total += atomic_read(&s->tb_phys_invalidate_count);
|
|
|
|
}
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
/* pool based memory allocation */
|
|
|
|
void *tcg_malloc_internal(TCGContext *s, int size)
|
|
|
|
{
|
|
|
|
TCGPool *p;
|
|
|
|
int pool_size;
|
|
|
|
|
|
|
|
if (size > TCG_POOL_CHUNK_SIZE) {
|
|
|
|
/* big malloc: insert a new pool (XXX: could optimize) */
|
2011-08-21 11:09:37 +08:00
|
|
|
p = g_malloc(sizeof(TCGPool) + size);
|
2008-02-01 18:05:41 +08:00
|
|
|
p->size = size;
|
2012-03-02 17:22:17 +08:00
|
|
|
p->next = s->pool_first_large;
|
|
|
|
s->pool_first_large = p;
|
|
|
|
return p->data;
|
2008-02-01 18:05:41 +08:00
|
|
|
} else {
|
|
|
|
p = s->pool_current;
|
|
|
|
if (!p) {
|
|
|
|
p = s->pool_first;
|
|
|
|
if (!p)
|
|
|
|
goto new_pool;
|
|
|
|
} else {
|
|
|
|
if (!p->next) {
|
|
|
|
new_pool:
|
|
|
|
pool_size = TCG_POOL_CHUNK_SIZE;
|
2011-08-21 11:09:37 +08:00
|
|
|
p = g_malloc(sizeof(TCGPool) + pool_size);
|
2008-02-01 18:05:41 +08:00
|
|
|
p->size = pool_size;
|
|
|
|
p->next = NULL;
|
|
|
|
if (s->pool_current)
|
|
|
|
s->pool_current->next = p;
|
|
|
|
else
|
|
|
|
s->pool_first = p;
|
|
|
|
} else {
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s->pool_current = p;
|
|
|
|
s->pool_cur = p->data + size;
|
|
|
|
s->pool_end = p->data + p->size;
|
|
|
|
return p->data;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_pool_reset(TCGContext *s)
|
|
|
|
{
|
2012-03-02 17:22:17 +08:00
|
|
|
TCGPool *p, *t;
|
|
|
|
for (p = s->pool_first_large; p; p = t) {
|
|
|
|
t = p->next;
|
|
|
|
g_free(p);
|
|
|
|
}
|
|
|
|
s->pool_first_large = NULL;
|
2008-02-01 18:05:41 +08:00
|
|
|
s->pool_cur = s->pool_end = NULL;
|
|
|
|
s->pool_current = NULL;
|
|
|
|
}
|
|
|
|
|
2013-09-15 06:57:22 +08:00
|
|
|
typedef struct TCGHelperInfo {
|
|
|
|
void *func;
|
|
|
|
const char *name;
|
2014-04-08 06:10:05 +08:00
|
|
|
unsigned flags;
|
|
|
|
unsigned sizemask;
|
2013-09-15 06:57:22 +08:00
|
|
|
} TCGHelperInfo;
|
|
|
|
|
2014-04-08 13:31:41 +08:00
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
|
2013-09-15 06:57:22 +08:00
|
|
|
static const TCGHelperInfo all_helpers[] = {
|
2014-04-08 13:31:41 +08:00
|
|
|
#include "exec/helper-tcg.h"
|
2013-09-15 06:57:22 +08:00
|
|
|
};
|
2017-07-06 06:41:23 +08:00
|
|
|
static GHashTable *helper_table;
|
2013-09-15 06:57:22 +08:00
|
|
|
|
2015-08-19 14:23:08 +08:00
|
|
|
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
|
2016-11-18 16:31:40 +08:00
|
|
|
static void process_op_defs(TCGContext *s);
|
2017-10-11 05:34:37 +08:00
|
|
|
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
|
|
|
|
TCGReg reg, const char *name);
|
2015-08-19 14:23:08 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
void tcg_context_init(TCGContext *s)
|
|
|
|
{
|
2013-09-15 06:57:22 +08:00
|
|
|
int op, total_args, n, i;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGOpDef *def;
|
|
|
|
TCGArgConstraint *args_ct;
|
|
|
|
int *sorted_args;
|
2017-10-11 05:34:37 +08:00
|
|
|
TCGTemp *ts;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
|
|
|
memset(s, 0, sizeof(*s));
|
|
|
|
s->nb_globals = 0;
|
2016-06-24 11:34:22 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
/* Count total number of arguments and allocate the corresponding
|
|
|
|
space */
|
|
|
|
total_args = 0;
|
|
|
|
for(op = 0; op < NB_OPS; op++) {
|
|
|
|
def = &tcg_op_defs[op];
|
|
|
|
n = def->nb_iargs + def->nb_oargs;
|
|
|
|
total_args += n;
|
|
|
|
}
|
|
|
|
|
2011-08-21 11:09:37 +08:00
|
|
|
args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
|
|
|
|
sorted_args = g_malloc(sizeof(int) * total_args);
|
2008-02-01 18:05:41 +08:00
|
|
|
|
|
|
|
for(op = 0; op < NB_OPS; op++) {
|
|
|
|
def = &tcg_op_defs[op];
|
|
|
|
def->args_ct = args_ct;
|
|
|
|
def->sorted_args = sorted_args;
|
|
|
|
n = def->nb_iargs + def->nb_oargs;
|
|
|
|
sorted_args += n;
|
|
|
|
args_ct += n;
|
|
|
|
}
|
2013-09-15 06:09:39 +08:00
|
|
|
|
|
|
|
/* Register helpers. */
|
2013-09-15 07:44:31 +08:00
|
|
|
/* Use g_direct_hash/equal for direct pointer comparisons on func. */
|
2017-07-06 06:41:23 +08:00
|
|
|
helper_table = g_hash_table_new(NULL, NULL);
|
2013-09-15 07:44:31 +08:00
|
|
|
|
2013-09-15 06:57:22 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
|
2013-09-15 07:44:31 +08:00
|
|
|
g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
|
2014-04-08 15:17:53 +08:00
|
|
|
(gpointer)&all_helpers[i]);
|
2013-09-15 06:57:22 +08:00
|
|
|
}
|
2013-09-15 06:09:39 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
tcg_target_init(s);
|
2016-11-18 16:31:40 +08:00
|
|
|
process_op_defs(s);
|
2015-08-19 14:23:08 +08:00
|
|
|
|
|
|
|
/* Reverse the order of the saved registers, assuming they're all at
|
|
|
|
the start of tcg_target_reg_alloc_order. */
|
|
|
|
for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
|
|
|
|
int r = tcg_target_reg_alloc_order[n];
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
|
|
|
|
}
|
|
|
|
for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
|
|
|
|
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
|
|
|
|
}
|
2017-07-13 05:15:52 +08:00
|
|
|
|
|
|
|
tcg_ctx = s;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
/*
|
|
|
|
* In user-mode we simply share the init context among threads, since we
|
|
|
|
* use a single region. See the documentation tcg_region_init() for the
|
|
|
|
* reasoning behind this.
|
|
|
|
* In softmmu we will have at most max_cpus TCG threads.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
2017-07-13 06:26:40 +08:00
|
|
|
tcg_ctxs = &tcg_ctx;
|
|
|
|
n_tcg_ctxs = 1;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
#else
|
|
|
|
tcg_ctxs = g_new(TCGContext *, max_cpus);
|
|
|
|
#endif
|
2017-10-11 05:34:37 +08:00
|
|
|
|
|
|
|
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
|
|
|
|
ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
|
|
|
|
cpu_env = temp_tcgv_ptr(ts);
|
2010-05-06 23:50:41 +08:00
|
|
|
}
|
2008-05-10 18:52:05 +08:00
|
|
|
|
2017-06-07 07:12:25 +08:00
|
|
|
/*
|
|
|
|
* Allocate TBs right before their corresponding translated code, making
|
|
|
|
* sure that TBs and code are on different cache lines.
|
|
|
|
*/
|
|
|
|
TranslationBlock *tcg_tb_alloc(TCGContext *s)
|
|
|
|
{
|
|
|
|
uintptr_t align = qemu_icache_linesize;
|
|
|
|
TranslationBlock *tb;
|
|
|
|
void *next;
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
retry:
|
2017-06-07 07:12:25 +08:00
|
|
|
tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
|
|
|
|
next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
|
|
|
|
|
|
|
|
if (unlikely(next > s->code_gen_highwater)) {
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
if (tcg_region_alloc(s)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
goto retry;
|
2017-06-07 07:12:25 +08:00
|
|
|
}
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 07:24:20 +08:00
|
|
|
atomic_set(&s->code_gen_ptr, next);
|
2017-07-31 04:13:21 +08:00
|
|
|
s->data_gen_ptr = NULL;
|
2017-06-07 07:12:25 +08:00
|
|
|
return tb;
|
|
|
|
}
|
|
|
|
|
2010-05-06 23:50:41 +08:00
|
|
|
void tcg_prologue_init(TCGContext *s)
|
|
|
|
{
|
2015-09-19 14:43:05 +08:00
|
|
|
size_t prologue_size, total_size;
|
|
|
|
void *buf0, *buf1;
|
|
|
|
|
|
|
|
/* Put the prologue at the beginning of code_gen_buffer. */
|
|
|
|
buf0 = s->code_gen_buffer;
|
2017-10-25 22:14:20 +08:00
|
|
|
total_size = s->code_gen_buffer_size;
|
2015-09-19 14:43:05 +08:00
|
|
|
s->code_ptr = buf0;
|
|
|
|
s->code_buf = buf0;
|
2017-10-25 22:14:20 +08:00
|
|
|
s->data_gen_ptr = NULL;
|
2015-09-19 14:43:05 +08:00
|
|
|
s->code_gen_prologue = buf0;
|
|
|
|
|
2017-10-25 22:14:20 +08:00
|
|
|
/* Compute a high-water mark, at which we voluntarily flush the buffer
|
|
|
|
and start over. The size here is arbitrary, significantly larger
|
|
|
|
than we expect the code generation for any one opcode to require. */
|
|
|
|
s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
|
|
|
|
|
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
s->pool_labels = NULL;
|
|
|
|
#endif
|
|
|
|
|
2015-09-19 14:43:05 +08:00
|
|
|
/* Generate the prologue. */
|
2008-05-10 18:52:05 +08:00
|
|
|
tcg_target_qemu_prologue(s);
|
2017-10-25 22:14:20 +08:00
|
|
|
|
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
/* Allow the prologue to put e.g. guest_base into a pool entry. */
|
|
|
|
{
|
|
|
|
bool ok = tcg_out_pool_finalize(s);
|
|
|
|
tcg_debug_assert(ok);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-09-19 14:43:05 +08:00
|
|
|
buf1 = s->code_ptr;
|
|
|
|
flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
|
|
|
|
|
|
|
|
/* Deduct the prologue from the buffer. */
|
|
|
|
prologue_size = tcg_current_code_size(s);
|
|
|
|
s->code_gen_ptr = buf1;
|
|
|
|
s->code_gen_buffer = buf1;
|
|
|
|
s->code_buf = buf1;
|
2017-10-25 22:14:20 +08:00
|
|
|
total_size -= prologue_size;
|
2015-09-19 14:43:05 +08:00
|
|
|
s->code_gen_buffer_size = total_size;
|
|
|
|
|
|
|
|
tcg_register_jit(s->code_gen_buffer, total_size);
|
2013-04-01 04:15:19 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG_DISAS
|
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
|
2016-09-23 06:17:10 +08:00
|
|
|
qemu_log_lock();
|
2015-09-19 14:43:05 +08:00
|
|
|
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
|
2017-10-25 22:14:20 +08:00
|
|
|
if (s->data_gen_ptr) {
|
|
|
|
size_t code_size = s->data_gen_ptr - buf0;
|
|
|
|
size_t data_size = prologue_size - code_size;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
log_disas(buf0, code_size);
|
|
|
|
|
|
|
|
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
|
|
|
|
if (sizeof(tcg_target_ulong) == 8) {
|
|
|
|
qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
|
|
|
|
(uintptr_t)s->data_gen_ptr + i,
|
|
|
|
*(uint64_t *)(s->data_gen_ptr + i));
|
|
|
|
} else {
|
|
|
|
qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
|
|
|
|
(uintptr_t)s->data_gen_ptr + i,
|
|
|
|
*(uint32_t *)(s->data_gen_ptr + i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log_disas(buf0, prologue_size);
|
|
|
|
}
|
2013-04-01 04:15:19 +08:00
|
|
|
qemu_log("\n");
|
|
|
|
qemu_log_flush();
|
2016-09-23 06:17:10 +08:00
|
|
|
qemu_log_unlock();
|
2013-04-01 04:15:19 +08:00
|
|
|
}
|
|
|
|
#endif
|
2017-04-27 11:29:14 +08:00
|
|
|
|
|
|
|
/* Assert that goto_ptr is implemented completely. */
|
|
|
|
if (TCG_TARGET_HAS_goto_ptr) {
|
|
|
|
tcg_debug_assert(s->code_gen_epilogue != NULL);
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_func_start(TCGContext *s)
|
|
|
|
{
|
|
|
|
tcg_pool_reset(s);
|
|
|
|
s->nb_temps = s->nb_globals;
|
2013-09-20 03:16:45 +08:00
|
|
|
|
|
|
|
/* No temps have been previously allocated for size or locality. */
|
|
|
|
memset(s->free_temps, 0, sizeof(s->free_temps));
|
|
|
|
|
2018-05-09 03:18:59 +08:00
|
|
|
s->nb_ops = 0;
|
2008-02-01 18:05:41 +08:00
|
|
|
s->nb_labels = 0;
|
|
|
|
s->current_frame_offset = s->frame_start;
|
|
|
|
|
2012-09-22 08:18:16 +08:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
s->goto_tb_issue_mask = 0;
|
|
|
|
#endif
|
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
QTAILQ_INIT(&s->ops);
|
|
|
|
QTAILQ_INIT(&s->free_ops);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2013-09-19 23:46:21 +08:00
|
|
|
static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
|
|
|
|
{
|
|
|
|
int n = s->nb_temps++;
|
|
|
|
tcg_debug_assert(n < TCG_MAX_TEMPS);
|
|
|
|
return memset(&s->temps[n], 0, sizeof(TCGTemp));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline TCGTemp *tcg_global_alloc(TCGContext *s)
|
|
|
|
{
|
2016-11-03 01:20:15 +08:00
|
|
|
TCGTemp *ts;
|
|
|
|
|
2013-09-19 23:46:21 +08:00
|
|
|
tcg_debug_assert(s->nb_globals == s->nb_temps);
|
|
|
|
s->nb_globals++;
|
2016-11-03 01:20:15 +08:00
|
|
|
ts = tcg_temp_alloc(s);
|
|
|
|
ts->temp_global = 1;
|
|
|
|
|
|
|
|
return ts;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2017-10-20 15:05:45 +08:00
|
|
|
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
|
|
|
|
TCGReg reg, const char *name)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
|
|
|
TCGTemp *ts;
|
|
|
|
|
2013-09-19 05:12:53 +08:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
|
2008-02-01 18:05:41 +08:00
|
|
|
tcg_abort();
|
2013-09-19 05:12:53 +08:00
|
|
|
}
|
2013-09-19 23:46:21 +08:00
|
|
|
|
|
|
|
ts = tcg_global_alloc(s);
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
|
|
|
ts->fixed_reg = 1;
|
|
|
|
ts->reg = reg;
|
|
|
|
ts->name = name;
|
|
|
|
tcg_regset_set_reg(s->reserved_regs, reg);
|
2013-09-19 23:46:21 +08:00
|
|
|
|
2017-10-20 15:05:45 +08:00
|
|
|
return ts;
|
2008-11-17 22:43:54 +08:00
|
|
|
}
|
|
|
|
|
2013-09-19 05:54:45 +08:00
|
|
|
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
|
2013-09-19 05:12:53 +08:00
|
|
|
{
|
|
|
|
s->frame_start = start;
|
|
|
|
s->frame_end = start + size;
|
2017-10-20 15:05:45 +08:00
|
|
|
s->frame_temp
|
|
|
|
= tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
|
2013-09-19 05:12:53 +08:00
|
|
|
}
|
|
|
|
|
2017-10-20 15:05:45 +08:00
|
|
|
TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
|
|
|
|
intptr_t offset, const char *name)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-07-13 05:15:52 +08:00
|
|
|
TCGContext *s = tcg_ctx;
|
2017-10-20 15:30:24 +08:00
|
|
|
TCGTemp *base_ts = tcgv_ptr_temp(base);
|
2013-09-19 23:46:21 +08:00
|
|
|
TCGTemp *ts = tcg_global_alloc(s);
|
2013-09-20 01:36:18 +08:00
|
|
|
int indirect_reg = 0, bigendian = 0;
|
2013-09-19 23:46:21 +08:00
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
|
|
|
bigendian = 1;
|
|
|
|
#endif
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2013-09-20 01:36:18 +08:00
|
|
|
if (!base_ts->fixed_reg) {
|
2016-06-24 11:34:33 +08:00
|
|
|
/* We do not support double-indirect registers. */
|
|
|
|
tcg_debug_assert(!base_ts->indirect_reg);
|
2013-09-20 01:36:18 +08:00
|
|
|
base_ts->indirect_base = 1;
|
2016-06-24 11:34:33 +08:00
|
|
|
s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
|
|
|
|
? 2 : 1);
|
|
|
|
indirect_reg = 1;
|
2013-09-20 01:36:18 +08:00
|
|
|
}
|
|
|
|
|
2013-09-19 23:46:21 +08:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
|
|
|
TCGTemp *ts2 = tcg_global_alloc(s);
|
2008-02-01 18:05:41 +08:00
|
|
|
char buf[64];
|
2013-09-19 23:46:21 +08:00
|
|
|
|
|
|
|
ts->base_type = TCG_TYPE_I64;
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->type = TCG_TYPE_I32;
|
2013-09-20 01:36:18 +08:00
|
|
|
ts->indirect_reg = indirect_reg;
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->mem_allocated = 1;
|
2013-09-19 05:12:53 +08:00
|
|
|
ts->mem_base = base_ts;
|
2013-09-19 23:46:21 +08:00
|
|
|
ts->mem_offset = offset + bigendian * 4;
|
2008-02-01 18:05:41 +08:00
|
|
|
pstrcpy(buf, sizeof(buf), name);
|
|
|
|
pstrcat(buf, sizeof(buf), "_0");
|
|
|
|
ts->name = strdup(buf);
|
|
|
|
|
2013-09-19 23:46:21 +08:00
|
|
|
tcg_debug_assert(ts2 == ts + 1);
|
|
|
|
ts2->base_type = TCG_TYPE_I64;
|
|
|
|
ts2->type = TCG_TYPE_I32;
|
2013-09-20 01:36:18 +08:00
|
|
|
ts2->indirect_reg = indirect_reg;
|
2013-09-19 23:46:21 +08:00
|
|
|
ts2->mem_allocated = 1;
|
|
|
|
ts2->mem_base = base_ts;
|
|
|
|
ts2->mem_offset = offset + (1 - bigendian) * 4;
|
2008-02-01 18:05:41 +08:00
|
|
|
pstrcpy(buf, sizeof(buf), name);
|
|
|
|
pstrcat(buf, sizeof(buf), "_1");
|
2016-06-18 08:02:20 +08:00
|
|
|
ts2->name = strdup(buf);
|
2013-09-19 23:46:21 +08:00
|
|
|
} else {
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
2013-09-20 01:36:18 +08:00
|
|
|
ts->indirect_reg = indirect_reg;
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->mem_allocated = 1;
|
2013-09-19 05:12:53 +08:00
|
|
|
ts->mem_base = base_ts;
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->mem_offset = offset;
|
|
|
|
ts->name = name;
|
|
|
|
}
|
2017-10-20 15:05:45 +08:00
|
|
|
return ts;
|
2008-11-17 22:43:54 +08:00
|
|
|
}
|
|
|
|
|
2018-02-23 10:17:57 +08:00
|
|
|
TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-07-13 05:15:52 +08:00
|
|
|
TCGContext *s = tcg_ctx;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGTemp *ts;
|
2008-05-26 01:24:00 +08:00
|
|
|
int idx, k;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2013-09-20 03:16:45 +08:00
|
|
|
k = type + (temp_local ? TCG_TYPE_COUNT : 0);
|
|
|
|
idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
|
|
|
|
if (idx < TCG_MAX_TEMPS) {
|
|
|
|
/* There is already an available temp with the right type. */
|
|
|
|
clear_bit(idx, s->free_temps[k].l);
|
|
|
|
|
2008-05-24 01:33:39 +08:00
|
|
|
ts = &s->temps[idx];
|
|
|
|
ts->temp_allocated = 1;
|
2013-09-19 23:46:21 +08:00
|
|
|
tcg_debug_assert(ts->base_type == type);
|
|
|
|
tcg_debug_assert(ts->temp_local == temp_local);
|
2008-05-24 01:33:39 +08:00
|
|
|
} else {
|
2013-09-19 23:46:21 +08:00
|
|
|
ts = tcg_temp_alloc(s);
|
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
|
|
|
TCGTemp *ts2 = tcg_temp_alloc(s);
|
|
|
|
|
2014-01-22 00:36:38 +08:00
|
|
|
ts->base_type = type;
|
2008-05-24 01:33:39 +08:00
|
|
|
ts->type = TCG_TYPE_I32;
|
|
|
|
ts->temp_allocated = 1;
|
2008-05-26 01:24:00 +08:00
|
|
|
ts->temp_local = temp_local;
|
2013-09-19 23:46:21 +08:00
|
|
|
|
|
|
|
tcg_debug_assert(ts2 == ts + 1);
|
|
|
|
ts2->base_type = TCG_TYPE_I64;
|
|
|
|
ts2->type = TCG_TYPE_I32;
|
|
|
|
ts2->temp_allocated = 1;
|
|
|
|
ts2->temp_local = temp_local;
|
|
|
|
} else {
|
2008-05-24 01:33:39 +08:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
|
|
|
ts->temp_allocated = 1;
|
2008-05-26 01:24:00 +08:00
|
|
|
ts->temp_local = temp_local;
|
2008-05-24 01:33:39 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2011-03-07 05:39:53 +08:00
|
|
|
|
|
|
|
#if defined(CONFIG_DEBUG_TCG)
|
|
|
|
s->temps_in_use++;
|
|
|
|
#endif
|
2017-10-20 15:05:45 +08:00
|
|
|
return ts;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2017-09-15 04:53:46 +08:00
|
|
|
TCGv_vec tcg_temp_new_vec(TCGType type)
|
|
|
|
{
|
|
|
|
TCGTemp *t;
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
assert(TCG_TARGET_HAS_v64);
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_V128:
|
|
|
|
assert(TCG_TARGET_HAS_v128);
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_V256:
|
|
|
|
assert(TCG_TARGET_HAS_v256);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
t = tcg_temp_new_internal(type, 0);
|
|
|
|
return temp_tcgv_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new temp of the same type as an existing temp. */
|
|
|
|
TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
|
|
|
|
{
|
|
|
|
TCGTemp *t = tcgv_vec_temp(match);
|
|
|
|
|
|
|
|
tcg_debug_assert(t->temp_allocated != 0);
|
|
|
|
|
|
|
|
t = tcg_temp_new_internal(t->base_type, 0);
|
|
|
|
return temp_tcgv_vec(t);
|
|
|
|
}
|
|
|
|
|
2018-02-23 10:17:57 +08:00
|
|
|
void tcg_temp_free_internal(TCGTemp *ts)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-07-13 05:15:52 +08:00
|
|
|
TCGContext *s = tcg_ctx;
|
2017-10-20 15:05:45 +08:00
|
|
|
int k, idx;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2011-03-07 05:39:53 +08:00
|
|
|
#if defined(CONFIG_DEBUG_TCG)
|
|
|
|
s->temps_in_use--;
|
|
|
|
if (s->temps_in_use < 0) {
|
|
|
|
fprintf(stderr, "More temporaries freed than allocated!\n");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-20 15:05:45 +08:00
|
|
|
tcg_debug_assert(ts->temp_global == 0);
|
2016-04-21 16:48:49 +08:00
|
|
|
tcg_debug_assert(ts->temp_allocated != 0);
|
2008-05-24 01:33:39 +08:00
|
|
|
ts->temp_allocated = 0;
|
2013-09-20 03:16:45 +08:00
|
|
|
|
2017-10-20 15:05:45 +08:00
|
|
|
idx = temp_idx(ts);
|
2014-01-19 23:53:31 +08:00
|
|
|
k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
|
2013-09-20 03:16:45 +08:00
|
|
|
set_bit(idx, s->free_temps[k].l);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i32 tcg_const_i32(int32_t val)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i32 t0;
|
|
|
|
t0 = tcg_temp_new_i32();
|
2008-05-24 01:33:39 +08:00
|
|
|
tcg_gen_movi_i32(t0, val);
|
|
|
|
return t0;
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i64 tcg_const_i64(int64_t val)
|
2008-05-24 01:33:39 +08:00
|
|
|
{
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i64 t0;
|
|
|
|
t0 = tcg_temp_new_i64();
|
2008-05-24 01:33:39 +08:00
|
|
|
tcg_gen_movi_i64(t0, val);
|
|
|
|
return t0;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i32 tcg_const_local_i32(int32_t val)
|
2008-10-21 19:30:45 +08:00
|
|
|
{
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i32 t0;
|
|
|
|
t0 = tcg_temp_local_new_i32();
|
2008-10-21 19:30:45 +08:00
|
|
|
tcg_gen_movi_i32(t0, val);
|
|
|
|
return t0;
|
|
|
|
}
|
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i64 tcg_const_local_i64(int64_t val)
|
2008-10-21 19:30:45 +08:00
|
|
|
{
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i64 t0;
|
|
|
|
t0 = tcg_temp_local_new_i64();
|
2008-10-21 19:30:45 +08:00
|
|
|
tcg_gen_movi_i64(t0, val);
|
|
|
|
return t0;
|
|
|
|
}
|
|
|
|
|
2011-03-07 05:39:53 +08:00
|
|
|
#if defined(CONFIG_DEBUG_TCG)
|
|
|
|
void tcg_clear_temp_count(void)
|
|
|
|
{
|
2017-07-13 05:15:52 +08:00
|
|
|
TCGContext *s = tcg_ctx;
|
2011-03-07 05:39:53 +08:00
|
|
|
s->temps_in_use = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tcg_check_temp_count(void)
|
|
|
|
{
|
2017-07-13 05:15:52 +08:00
|
|
|
TCGContext *s = tcg_ctx;
|
2011-03-07 05:39:53 +08:00
|
|
|
if (s->temps_in_use) {
|
|
|
|
/* Clear the count so that we don't give another
|
|
|
|
* warning immediately next time around.
|
|
|
|
*/
|
|
|
|
s->temps_in_use = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-08-17 22:43:20 +08:00
|
|
|
/* Return true if OP may appear in the opcode stream.
|
|
|
|
Test the runtime variable that controls each opcode. */
|
|
|
|
bool tcg_op_supported(TCGOpcode op)
|
|
|
|
{
|
2017-09-15 04:53:46 +08:00
|
|
|
const bool have_vec
|
|
|
|
= TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
|
|
|
|
|
2017-08-17 22:43:20 +08:00
|
|
|
switch (op) {
|
|
|
|
case INDEX_op_discard:
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
case INDEX_op_call:
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_mb:
|
|
|
|
case INDEX_op_insn_start:
|
|
|
|
case INDEX_op_exit_tb:
|
|
|
|
case INDEX_op_goto_tb:
|
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
|
|
case INDEX_op_qemu_st_i32:
|
|
|
|
case INDEX_op_qemu_ld_i64:
|
|
|
|
case INDEX_op_qemu_st_i64:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case INDEX_op_goto_ptr:
|
|
|
|
return TCG_TARGET_HAS_goto_ptr;
|
|
|
|
|
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_movi_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
case INDEX_op_st8_i32:
|
|
|
|
case INDEX_op_st16_i32:
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
case INDEX_op_add_i32:
|
|
|
|
case INDEX_op_sub_i32:
|
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
case INDEX_op_and_i32:
|
|
|
|
case INDEX_op_or_i32:
|
|
|
|
case INDEX_op_xor_i32:
|
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
return TCG_TARGET_HAS_movcond_i32;
|
|
|
|
case INDEX_op_div_i32:
|
|
|
|
case INDEX_op_divu_i32:
|
|
|
|
return TCG_TARGET_HAS_div_i32;
|
|
|
|
case INDEX_op_rem_i32:
|
|
|
|
case INDEX_op_remu_i32:
|
|
|
|
return TCG_TARGET_HAS_rem_i32;
|
|
|
|
case INDEX_op_div2_i32:
|
|
|
|
case INDEX_op_divu2_i32:
|
|
|
|
return TCG_TARGET_HAS_div2_i32;
|
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
case INDEX_op_rotr_i32:
|
|
|
|
return TCG_TARGET_HAS_rot_i32;
|
|
|
|
case INDEX_op_deposit_i32:
|
|
|
|
return TCG_TARGET_HAS_deposit_i32;
|
|
|
|
case INDEX_op_extract_i32:
|
|
|
|
return TCG_TARGET_HAS_extract_i32;
|
|
|
|
case INDEX_op_sextract_i32:
|
|
|
|
return TCG_TARGET_HAS_sextract_i32;
|
|
|
|
case INDEX_op_add2_i32:
|
|
|
|
return TCG_TARGET_HAS_add2_i32;
|
|
|
|
case INDEX_op_sub2_i32:
|
|
|
|
return TCG_TARGET_HAS_sub2_i32;
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
return TCG_TARGET_HAS_mulu2_i32;
|
|
|
|
case INDEX_op_muls2_i32:
|
|
|
|
return TCG_TARGET_HAS_muls2_i32;
|
|
|
|
case INDEX_op_muluh_i32:
|
|
|
|
return TCG_TARGET_HAS_muluh_i32;
|
|
|
|
case INDEX_op_mulsh_i32:
|
|
|
|
return TCG_TARGET_HAS_mulsh_i32;
|
|
|
|
case INDEX_op_ext8s_i32:
|
|
|
|
return TCG_TARGET_HAS_ext8s_i32;
|
|
|
|
case INDEX_op_ext16s_i32:
|
|
|
|
return TCG_TARGET_HAS_ext16s_i32;
|
|
|
|
case INDEX_op_ext8u_i32:
|
|
|
|
return TCG_TARGET_HAS_ext8u_i32;
|
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
return TCG_TARGET_HAS_ext16u_i32;
|
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
return TCG_TARGET_HAS_bswap16_i32;
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
return TCG_TARGET_HAS_bswap32_i32;
|
|
|
|
case INDEX_op_not_i32:
|
|
|
|
return TCG_TARGET_HAS_not_i32;
|
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
return TCG_TARGET_HAS_neg_i32;
|
|
|
|
case INDEX_op_andc_i32:
|
|
|
|
return TCG_TARGET_HAS_andc_i32;
|
|
|
|
case INDEX_op_orc_i32:
|
|
|
|
return TCG_TARGET_HAS_orc_i32;
|
|
|
|
case INDEX_op_eqv_i32:
|
|
|
|
return TCG_TARGET_HAS_eqv_i32;
|
|
|
|
case INDEX_op_nand_i32:
|
|
|
|
return TCG_TARGET_HAS_nand_i32;
|
|
|
|
case INDEX_op_nor_i32:
|
|
|
|
return TCG_TARGET_HAS_nor_i32;
|
|
|
|
case INDEX_op_clz_i32:
|
|
|
|
return TCG_TARGET_HAS_clz_i32;
|
|
|
|
case INDEX_op_ctz_i32:
|
|
|
|
return TCG_TARGET_HAS_ctz_i32;
|
|
|
|
case INDEX_op_ctpop_i32:
|
|
|
|
return TCG_TARGET_HAS_ctpop_i32;
|
|
|
|
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
case INDEX_op_setcond2_i32:
|
|
|
|
return TCG_TARGET_REG_BITS == 32;
|
|
|
|
|
|
|
|
case INDEX_op_mov_i64:
|
|
|
|
case INDEX_op_movi_i64:
|
|
|
|
case INDEX_op_setcond_i64:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
case INDEX_op_ld8u_i64:
|
|
|
|
case INDEX_op_ld8s_i64:
|
|
|
|
case INDEX_op_ld16u_i64:
|
|
|
|
case INDEX_op_ld16s_i64:
|
|
|
|
case INDEX_op_ld32u_i64:
|
|
|
|
case INDEX_op_ld32s_i64:
|
|
|
|
case INDEX_op_ld_i64:
|
|
|
|
case INDEX_op_st8_i64:
|
|
|
|
case INDEX_op_st16_i64:
|
|
|
|
case INDEX_op_st32_i64:
|
|
|
|
case INDEX_op_st_i64:
|
|
|
|
case INDEX_op_add_i64:
|
|
|
|
case INDEX_op_sub_i64:
|
|
|
|
case INDEX_op_mul_i64:
|
|
|
|
case INDEX_op_and_i64:
|
|
|
|
case INDEX_op_or_i64:
|
|
|
|
case INDEX_op_xor_i64:
|
|
|
|
case INDEX_op_shl_i64:
|
|
|
|
case INDEX_op_shr_i64:
|
|
|
|
case INDEX_op_sar_i64:
|
|
|
|
case INDEX_op_ext_i32_i64:
|
|
|
|
case INDEX_op_extu_i32_i64:
|
|
|
|
return TCG_TARGET_REG_BITS == 64;
|
|
|
|
|
|
|
|
case INDEX_op_movcond_i64:
|
|
|
|
return TCG_TARGET_HAS_movcond_i64;
|
|
|
|
case INDEX_op_div_i64:
|
|
|
|
case INDEX_op_divu_i64:
|
|
|
|
return TCG_TARGET_HAS_div_i64;
|
|
|
|
case INDEX_op_rem_i64:
|
|
|
|
case INDEX_op_remu_i64:
|
|
|
|
return TCG_TARGET_HAS_rem_i64;
|
|
|
|
case INDEX_op_div2_i64:
|
|
|
|
case INDEX_op_divu2_i64:
|
|
|
|
return TCG_TARGET_HAS_div2_i64;
|
|
|
|
case INDEX_op_rotl_i64:
|
|
|
|
case INDEX_op_rotr_i64:
|
|
|
|
return TCG_TARGET_HAS_rot_i64;
|
|
|
|
case INDEX_op_deposit_i64:
|
|
|
|
return TCG_TARGET_HAS_deposit_i64;
|
|
|
|
case INDEX_op_extract_i64:
|
|
|
|
return TCG_TARGET_HAS_extract_i64;
|
|
|
|
case INDEX_op_sextract_i64:
|
|
|
|
return TCG_TARGET_HAS_sextract_i64;
|
|
|
|
case INDEX_op_extrl_i64_i32:
|
|
|
|
return TCG_TARGET_HAS_extrl_i64_i32;
|
|
|
|
case INDEX_op_extrh_i64_i32:
|
|
|
|
return TCG_TARGET_HAS_extrh_i64_i32;
|
|
|
|
case INDEX_op_ext8s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext8s_i64;
|
|
|
|
case INDEX_op_ext16s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext16s_i64;
|
|
|
|
case INDEX_op_ext32s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext32s_i64;
|
|
|
|
case INDEX_op_ext8u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext8u_i64;
|
|
|
|
case INDEX_op_ext16u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext16u_i64;
|
|
|
|
case INDEX_op_ext32u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext32u_i64;
|
|
|
|
case INDEX_op_bswap16_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap16_i64;
|
|
|
|
case INDEX_op_bswap32_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap32_i64;
|
|
|
|
case INDEX_op_bswap64_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap64_i64;
|
|
|
|
case INDEX_op_not_i64:
|
|
|
|
return TCG_TARGET_HAS_not_i64;
|
|
|
|
case INDEX_op_neg_i64:
|
|
|
|
return TCG_TARGET_HAS_neg_i64;
|
|
|
|
case INDEX_op_andc_i64:
|
|
|
|
return TCG_TARGET_HAS_andc_i64;
|
|
|
|
case INDEX_op_orc_i64:
|
|
|
|
return TCG_TARGET_HAS_orc_i64;
|
|
|
|
case INDEX_op_eqv_i64:
|
|
|
|
return TCG_TARGET_HAS_eqv_i64;
|
|
|
|
case INDEX_op_nand_i64:
|
|
|
|
return TCG_TARGET_HAS_nand_i64;
|
|
|
|
case INDEX_op_nor_i64:
|
|
|
|
return TCG_TARGET_HAS_nor_i64;
|
|
|
|
case INDEX_op_clz_i64:
|
|
|
|
return TCG_TARGET_HAS_clz_i64;
|
|
|
|
case INDEX_op_ctz_i64:
|
|
|
|
return TCG_TARGET_HAS_ctz_i64;
|
|
|
|
case INDEX_op_ctpop_i64:
|
|
|
|
return TCG_TARGET_HAS_ctpop_i64;
|
|
|
|
case INDEX_op_add2_i64:
|
|
|
|
return TCG_TARGET_HAS_add2_i64;
|
|
|
|
case INDEX_op_sub2_i64:
|
|
|
|
return TCG_TARGET_HAS_sub2_i64;
|
|
|
|
case INDEX_op_mulu2_i64:
|
|
|
|
return TCG_TARGET_HAS_mulu2_i64;
|
|
|
|
case INDEX_op_muls2_i64:
|
|
|
|
return TCG_TARGET_HAS_muls2_i64;
|
|
|
|
case INDEX_op_muluh_i64:
|
|
|
|
return TCG_TARGET_HAS_muluh_i64;
|
|
|
|
case INDEX_op_mulsh_i64:
|
|
|
|
return TCG_TARGET_HAS_mulsh_i64;
|
|
|
|
|
2017-09-15 04:53:46 +08:00
|
|
|
case INDEX_op_mov_vec:
|
|
|
|
case INDEX_op_dup_vec:
|
|
|
|
case INDEX_op_dupi_vec:
|
|
|
|
case INDEX_op_ld_vec:
|
|
|
|
case INDEX_op_st_vec:
|
|
|
|
case INDEX_op_add_vec:
|
|
|
|
case INDEX_op_sub_vec:
|
|
|
|
case INDEX_op_and_vec:
|
|
|
|
case INDEX_op_or_vec:
|
|
|
|
case INDEX_op_xor_vec:
|
2017-11-18 03:47:42 +08:00
|
|
|
case INDEX_op_cmp_vec:
|
2017-09-15 04:53:46 +08:00
|
|
|
return have_vec;
|
|
|
|
case INDEX_op_dup2_vec:
|
|
|
|
return have_vec && TCG_TARGET_REG_BITS == 32;
|
|
|
|
case INDEX_op_not_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_not_vec;
|
|
|
|
case INDEX_op_neg_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_neg_vec;
|
|
|
|
case INDEX_op_andc_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_andc_vec;
|
|
|
|
case INDEX_op_orc_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_orc_vec;
|
2017-11-21 17:11:14 +08:00
|
|
|
case INDEX_op_mul_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_mul_vec;
|
2017-11-17 21:35:11 +08:00
|
|
|
case INDEX_op_shli_vec:
|
|
|
|
case INDEX_op_shri_vec:
|
|
|
|
case INDEX_op_sari_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shi_vec;
|
|
|
|
case INDEX_op_shls_vec:
|
|
|
|
case INDEX_op_shrs_vec:
|
|
|
|
case INDEX_op_sars_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shs_vec;
|
|
|
|
case INDEX_op_shlv_vec:
|
|
|
|
case INDEX_op_shrv_vec:
|
|
|
|
case INDEX_op_sarv_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shv_vec;
|
2017-09-15 04:53:46 +08:00
|
|
|
|
2017-09-16 05:11:45 +08:00
|
|
|
default:
|
|
|
|
tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
|
|
|
|
return true;
|
2017-08-17 22:43:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-22 22:59:57 +08:00
|
|
|
/* Note: we convert the 64 bit args to 32 bit and do some alignment
|
|
|
|
and endian swap. Maybe it would be better to do the alignment
|
|
|
|
and endian swap in tcg_reg_alloc_call(). */
|
2017-10-16 04:27:56 +08:00
|
|
|
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2016-12-09 02:52:57 +08:00
|
|
|
int i, real_args, nb_rets, pi;
|
2014-04-08 23:39:43 +08:00
|
|
|
unsigned sizemask, flags;
|
2014-04-08 06:10:05 +08:00
|
|
|
TCGHelperInfo *info;
|
2016-12-09 02:52:57 +08:00
|
|
|
TCGOp *op;
|
2014-04-08 06:10:05 +08:00
|
|
|
|
2017-07-06 06:41:23 +08:00
|
|
|
info = g_hash_table_lookup(helper_table, (gpointer)func);
|
2014-04-08 23:39:43 +08:00
|
|
|
flags = info->flags;
|
|
|
|
sizemask = info->sizemask;
|
2010-06-15 08:35:27 +08:00
|
|
|
|
2014-03-05 05:39:48 +08:00
|
|
|
#if defined(__sparc__) && !defined(__arch64__) \
|
|
|
|
&& !defined(CONFIG_TCG_INTERPRETER)
|
|
|
|
/* We have 64-bit values in one register, but need to pass as two
|
|
|
|
separate parameters. Split them. */
|
|
|
|
int orig_sizemask = sizemask;
|
|
|
|
int orig_nargs = nargs;
|
|
|
|
TCGv_i64 retl, reth;
|
2017-10-16 04:27:56 +08:00
|
|
|
TCGTemp *split_args[MAX_OPC_PARAM];
|
2014-03-05 05:39:48 +08:00
|
|
|
|
2017-11-02 19:47:37 +08:00
|
|
|
retl = NULL;
|
|
|
|
reth = NULL;
|
2014-03-05 05:39:48 +08:00
|
|
|
if (sizemask != 0) {
|
|
|
|
for (i = real_args = 0; i < nargs; ++i) {
|
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
|
|
|
if (is_64bit) {
|
2017-10-20 15:05:45 +08:00
|
|
|
TCGv_i64 orig = temp_tcgv_i64(args[i]);
|
2014-03-05 05:39:48 +08:00
|
|
|
TCGv_i32 h = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 l = tcg_temp_new_i32();
|
|
|
|
tcg_gen_extr_i64_i32(l, h, orig);
|
2017-10-16 04:27:56 +08:00
|
|
|
split_args[real_args++] = tcgv_i32_temp(h);
|
|
|
|
split_args[real_args++] = tcgv_i32_temp(l);
|
2014-03-05 05:39:48 +08:00
|
|
|
} else {
|
|
|
|
split_args[real_args++] = args[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nargs = real_args;
|
|
|
|
args = split_args;
|
|
|
|
sizemask = 0;
|
|
|
|
}
|
|
|
|
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
|
2010-06-15 08:35:27 +08:00
|
|
|
for (i = 0; i < nargs; ++i) {
|
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
|
|
|
int is_signed = sizemask & (2 << (i+1)*2);
|
|
|
|
if (!is_64bit) {
|
|
|
|
TCGv_i64 temp = tcg_temp_new_i64();
|
2017-10-20 15:05:45 +08:00
|
|
|
TCGv_i64 orig = temp_tcgv_i64(args[i]);
|
2010-06-15 08:35:27 +08:00
|
|
|
if (is_signed) {
|
|
|
|
tcg_gen_ext32s_i64(temp, orig);
|
|
|
|
} else {
|
|
|
|
tcg_gen_ext32u_i64(temp, orig);
|
|
|
|
}
|
2017-10-16 04:27:56 +08:00
|
|
|
args[i] = tcgv_i64_temp(temp);
|
2010-06-15 08:35:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* TCG_TARGET_EXTEND_ARGS */
|
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
op = tcg_emit_op(INDEX_op_call);
|
2016-12-09 02:52:57 +08:00
|
|
|
|
|
|
|
pi = 0;
|
2017-10-16 04:27:56 +08:00
|
|
|
if (ret != NULL) {
|
2014-03-05 05:39:48 +08:00
|
|
|
#if defined(__sparc__) && !defined(__arch64__) \
|
|
|
|
&& !defined(CONFIG_TCG_INTERPRETER)
|
|
|
|
if (orig_sizemask & 1) {
|
|
|
|
/* The 32-bit ABI is going to return the 64-bit value in
|
|
|
|
the %o0/%o1 register pair. Prepare for this by using
|
|
|
|
two return temporaries, and reassemble below. */
|
|
|
|
retl = tcg_temp_new_i64();
|
|
|
|
reth = tcg_temp_new_i64();
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = tcgv_i64_arg(reth);
|
|
|
|
op->args[pi++] = tcgv_i64_arg(retl);
|
2014-03-05 05:39:48 +08:00
|
|
|
nb_rets = 2;
|
|
|
|
} else {
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = temp_arg(ret);
|
2014-03-05 05:39:48 +08:00
|
|
|
nb_rets = 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
|
2014-04-01 05:09:13 +08:00
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = temp_arg(ret + 1);
|
|
|
|
op->args[pi++] = temp_arg(ret);
|
2008-05-22 22:59:57 +08:00
|
|
|
#else
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = temp_arg(ret);
|
|
|
|
op->args[pi++] = temp_arg(ret + 1);
|
2008-05-22 22:59:57 +08:00
|
|
|
#endif
|
2008-11-17 22:43:54 +08:00
|
|
|
nb_rets = 2;
|
2014-03-05 05:39:48 +08:00
|
|
|
} else {
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = temp_arg(ret);
|
2008-11-17 22:43:54 +08:00
|
|
|
nb_rets = 1;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2014-03-05 05:39:48 +08:00
|
|
|
#endif
|
2008-11-17 22:43:54 +08:00
|
|
|
} else {
|
|
|
|
nb_rets = 0;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2017-11-14 20:02:51 +08:00
|
|
|
TCGOP_CALLO(op) = nb_rets;
|
2016-12-09 02:52:57 +08:00
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
real_args = 0;
|
|
|
|
for (i = 0; i < nargs; i++) {
|
2010-06-15 08:35:27 +08:00
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
2014-04-08 23:39:43 +08:00
|
|
|
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
|
2008-05-22 22:59:57 +08:00
|
|
|
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
|
|
|
|
/* some targets want aligned 64 bit args */
|
2008-11-30 03:55:15 +08:00
|
|
|
if (real_args & 1) {
|
2016-12-09 02:52:57 +08:00
|
|
|
op->args[pi++] = TCG_CALL_DUMMY_ARG;
|
2008-11-30 03:55:15 +08:00
|
|
|
real_args++;
|
2008-05-22 22:59:57 +08:00
|
|
|
}
|
|
|
|
#endif
|
2016-06-24 11:34:22 +08:00
|
|
|
/* If stack grows up, then we will be placing successive
|
|
|
|
arguments at lower addresses, which means we need to
|
|
|
|
reverse the order compared to how we would normally
|
|
|
|
treat either big or little-endian. For those arguments
|
|
|
|
that will wind up in registers, this still works for
|
|
|
|
HPPA (the only current STACK_GROWSUP target) since the
|
|
|
|
argument registers are *also* allocated in decreasing
|
|
|
|
order. If another such target is added, this logic may
|
|
|
|
have to get more complicated to differentiate between
|
|
|
|
stack arguments and register arguments. */
|
2014-04-01 05:09:13 +08:00
|
|
|
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = temp_arg(args[i] + 1);
|
|
|
|
op->args[pi++] = temp_arg(args[i]);
|
2008-02-01 18:05:41 +08:00
|
|
|
#else
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = temp_arg(args[i]);
|
|
|
|
op->args[pi++] = temp_arg(args[i] + 1);
|
2008-02-01 18:05:41 +08:00
|
|
|
#endif
|
2008-11-17 22:43:54 +08:00
|
|
|
real_args += 2;
|
2010-06-15 08:35:27 +08:00
|
|
|
continue;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2010-06-15 08:35:27 +08:00
|
|
|
|
2017-10-16 04:27:56 +08:00
|
|
|
op->args[pi++] = temp_arg(args[i]);
|
2010-06-15 08:35:27 +08:00
|
|
|
real_args++;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2016-12-09 02:52:57 +08:00
|
|
|
op->args[pi++] = (uintptr_t)func;
|
|
|
|
op->args[pi++] = flags;
|
2017-11-14 20:02:51 +08:00
|
|
|
TCGOP_CALLI(op) = real_args;
|
2008-11-17 22:43:54 +08:00
|
|
|
|
2016-12-09 02:52:57 +08:00
|
|
|
/* Make sure the fields didn't overflow. */
|
2017-11-14 20:02:51 +08:00
|
|
|
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
|
2016-12-09 02:52:57 +08:00
|
|
|
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
|
2010-06-15 08:35:27 +08:00
|
|
|
|
2014-03-05 05:39:48 +08:00
|
|
|
#if defined(__sparc__) && !defined(__arch64__) \
|
|
|
|
&& !defined(CONFIG_TCG_INTERPRETER)
|
|
|
|
/* Free all of the parts we allocated above. */
|
|
|
|
for (i = real_args = 0; i < orig_nargs; ++i) {
|
|
|
|
int is_64bit = orig_sizemask & (1 << (i+1)*2);
|
|
|
|
if (is_64bit) {
|
2017-10-20 15:05:45 +08:00
|
|
|
tcg_temp_free_internal(args[real_args++]);
|
|
|
|
tcg_temp_free_internal(args[real_args++]);
|
2014-03-05 05:39:48 +08:00
|
|
|
} else {
|
|
|
|
real_args++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (orig_sizemask & 1) {
|
|
|
|
/* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
|
|
|
|
Note that describing these as TCGv_i64 eliminates an unnecessary
|
|
|
|
zero-extension that tcg_gen_concat_i32_i64 would create. */
|
2017-10-20 15:05:45 +08:00
|
|
|
tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
|
2014-03-05 05:39:48 +08:00
|
|
|
tcg_temp_free_i64(retl);
|
|
|
|
tcg_temp_free_i64(reth);
|
|
|
|
}
|
|
|
|
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
|
2010-06-15 08:35:27 +08:00
|
|
|
for (i = 0; i < nargs; ++i) {
|
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
|
|
|
if (!is_64bit) {
|
2017-10-20 15:05:45 +08:00
|
|
|
tcg_temp_free_internal(args[i]);
|
2010-06-15 08:35:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* TCG_TARGET_EXTEND_ARGS */
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2008-08-18 04:26:25 +08:00
|
|
|
static void tcg_reg_alloc_start(TCGContext *s)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2016-11-03 01:21:44 +08:00
|
|
|
int i, n;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGTemp *ts;
|
2016-11-03 01:21:44 +08:00
|
|
|
|
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2008-02-01 18:05:41 +08:00
|
|
|
ts = &s->temps[i];
|
2016-11-03 01:21:44 +08:00
|
|
|
ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2016-11-03 01:21:44 +08:00
|
|
|
for (n = s->nb_temps; i < n; i++) {
|
2008-05-24 01:33:39 +08:00
|
|
|
ts = &s->temps[i];
|
2016-11-03 01:21:44 +08:00
|
|
|
ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
|
2008-05-24 01:33:39 +08:00
|
|
|
ts->mem_allocated = 0;
|
|
|
|
ts->fixed_reg = 0;
|
|
|
|
}
|
2013-09-19 06:21:56 +08:00
|
|
|
|
|
|
|
memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2013-09-19 06:21:56 +08:00
|
|
|
static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
|
|
|
|
TCGTemp *ts)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-06-21 03:24:57 +08:00
|
|
|
int idx = temp_idx(ts);
|
2008-02-04 03:56:33 +08:00
|
|
|
|
2016-11-03 01:20:15 +08:00
|
|
|
if (ts->temp_global) {
|
2008-02-04 03:56:33 +08:00
|
|
|
pstrcpy(buf, buf_size, ts->name);
|
2013-09-19 06:21:56 +08:00
|
|
|
} else if (ts->temp_local) {
|
|
|
|
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
|
2008-02-01 18:05:41 +08:00
|
|
|
} else {
|
2013-09-19 06:21:56 +08:00
|
|
|
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-06-20 14:18:10 +08:00
|
|
|
static char *tcg_get_arg_str(TCGContext *s, char *buf,
|
|
|
|
int buf_size, TCGArg arg)
|
2013-09-19 06:21:56 +08:00
|
|
|
{
|
2017-06-20 14:18:10 +08:00
|
|
|
return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
|
2013-09-19 06:21:56 +08:00
|
|
|
}
|
|
|
|
|
2013-09-15 05:37:06 +08:00
|
|
|
/* Find helper name. */
|
|
|
|
static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
|
2008-05-23 00:08:32 +08:00
|
|
|
{
|
2013-09-15 05:37:06 +08:00
|
|
|
const char *ret = NULL;
|
2017-07-06 06:41:23 +08:00
|
|
|
if (helper_table) {
|
|
|
|
TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
|
2014-04-08 15:17:53 +08:00
|
|
|
if (info) {
|
|
|
|
ret = info->name;
|
|
|
|
}
|
2008-05-23 00:08:32 +08:00
|
|
|
}
|
2013-09-15 05:37:06 +08:00
|
|
|
return ret;
|
2008-05-23 00:08:32 +08:00
|
|
|
}
|
|
|
|
|
2008-09-14 15:45:17 +08:00
|
|
|
static const char * const cond_name[] =
|
|
|
|
{
|
2012-09-25 05:21:40 +08:00
|
|
|
[TCG_COND_NEVER] = "never",
|
|
|
|
[TCG_COND_ALWAYS] = "always",
|
2008-09-14 15:45:17 +08:00
|
|
|
[TCG_COND_EQ] = "eq",
|
|
|
|
[TCG_COND_NE] = "ne",
|
|
|
|
[TCG_COND_LT] = "lt",
|
|
|
|
[TCG_COND_GE] = "ge",
|
|
|
|
[TCG_COND_LE] = "le",
|
|
|
|
[TCG_COND_GT] = "gt",
|
|
|
|
[TCG_COND_LTU] = "ltu",
|
|
|
|
[TCG_COND_GEU] = "geu",
|
|
|
|
[TCG_COND_LEU] = "leu",
|
|
|
|
[TCG_COND_GTU] = "gtu"
|
|
|
|
};
|
|
|
|
|
2013-09-04 23:11:05 +08:00
|
|
|
static const char * const ldst_name[] =
|
|
|
|
{
|
|
|
|
[MO_UB] = "ub",
|
|
|
|
[MO_SB] = "sb",
|
|
|
|
[MO_LEUW] = "leuw",
|
|
|
|
[MO_LESW] = "lesw",
|
|
|
|
[MO_LEUL] = "leul",
|
|
|
|
[MO_LESL] = "lesl",
|
|
|
|
[MO_LEQ] = "leq",
|
|
|
|
[MO_BEUW] = "beuw",
|
|
|
|
[MO_BESW] = "besw",
|
|
|
|
[MO_BEUL] = "beul",
|
|
|
|
[MO_BESL] = "besl",
|
|
|
|
[MO_BEQ] = "beq",
|
|
|
|
};
|
|
|
|
|
2016-06-24 02:16:46 +08:00
|
|
|
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
[MO_UNALN >> MO_ASHIFT] = "un+",
|
|
|
|
[MO_ALIGN >> MO_ASHIFT] = "",
|
|
|
|
#else
|
|
|
|
[MO_UNALN >> MO_ASHIFT] = "",
|
|
|
|
[MO_ALIGN >> MO_ASHIFT] = "al+",
|
|
|
|
#endif
|
|
|
|
[MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
|
|
|
|
[MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
|
|
|
|
[MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
|
|
|
|
[MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
|
|
|
|
[MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
|
|
|
|
[MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
|
|
|
|
};
|
|
|
|
|
2012-06-04 00:35:32 +08:00
|
|
|
void tcg_dump_ops(TCGContext *s)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
|
|
|
char buf[128];
|
2014-09-20 04:49:15 +08:00
|
|
|
TCGOp *op;
|
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
2014-09-20 04:49:15 +08:00
|
|
|
int i, k, nb_oargs, nb_iargs, nb_cargs;
|
|
|
|
const TCGOpDef *def;
|
|
|
|
TCGOpcode c;
|
2016-06-24 10:15:55 +08:00
|
|
|
int col = 0;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2014-09-20 04:49:15 +08:00
|
|
|
c = op->opc;
|
2008-02-01 18:05:41 +08:00
|
|
|
def = &tcg_op_defs[c];
|
2014-09-20 04:49:15 +08:00
|
|
|
|
2015-08-30 03:37:33 +08:00
|
|
|
if (c == INDEX_op_insn_start) {
|
2017-11-02 22:19:14 +08:00
|
|
|
col += qemu_log("\n ----");
|
2015-08-31 00:21:33 +08:00
|
|
|
|
|
|
|
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
|
|
|
|
target_ulong a;
|
2008-05-23 00:56:05 +08:00
|
|
|
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
2016-12-09 05:12:08 +08:00
|
|
|
a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
|
2008-05-23 00:56:05 +08:00
|
|
|
#else
|
2016-12-09 05:12:08 +08:00
|
|
|
a = op->args[i];
|
2008-05-23 00:56:05 +08:00
|
|
|
#endif
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(" " TARGET_FMT_lx, a);
|
2012-06-04 00:35:32 +08:00
|
|
|
}
|
2008-05-23 00:56:05 +08:00
|
|
|
} else if (c == INDEX_op_call) {
|
2008-02-01 18:05:41 +08:00
|
|
|
/* variable number of arguments */
|
2017-11-14 20:02:51 +08:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2008-02-01 18:05:41 +08:00
|
|
|
nb_cargs = def->nb_cargs;
|
|
|
|
|
2014-03-23 11:06:52 +08:00
|
|
|
/* function name, flags, out args */
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
|
2016-12-09 05:12:08 +08:00
|
|
|
tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
|
|
|
|
op->args[nb_oargs + nb_iargs + 1], nb_oargs);
|
2014-03-23 11:06:52 +08:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2017-06-20 14:18:10 +08:00
|
|
|
col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[i]));
|
2008-05-10 18:52:05 +08:00
|
|
|
}
|
2014-03-23 11:06:52 +08:00
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
2016-12-09 05:12:08 +08:00
|
|
|
TCGArg arg = op->args[nb_oargs + i];
|
2014-03-23 11:06:52 +08:00
|
|
|
const char *t = "<dummy>";
|
|
|
|
if (arg != TCG_CALL_DUMMY_ARG) {
|
2017-06-20 14:18:10 +08:00
|
|
|
t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
|
2012-06-04 00:35:32 +08:00
|
|
|
}
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(",%s", t);
|
2008-05-24 01:33:39 +08:00
|
|
|
}
|
2008-05-10 18:52:05 +08:00
|
|
|
} else {
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(" %s ", def->name);
|
2014-09-20 04:49:15 +08:00
|
|
|
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_cargs = def->nb_cargs;
|
|
|
|
|
2017-09-15 04:53:46 +08:00
|
|
|
if (def->flags & TCG_OPF_VECTOR) {
|
|
|
|
col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
|
|
|
|
8 << TCGOP_VECE(op));
|
|
|
|
}
|
|
|
|
|
2008-05-10 18:52:05 +08:00
|
|
|
k = 0;
|
2014-09-20 04:49:15 +08:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2012-06-04 00:35:32 +08:00
|
|
|
if (k != 0) {
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(",");
|
2012-06-04 00:35:32 +08:00
|
|
|
}
|
2017-06-20 14:18:10 +08:00
|
|
|
col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[k++]));
|
2008-05-10 18:52:05 +08:00
|
|
|
}
|
2014-09-20 04:49:15 +08:00
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
2012-06-04 00:35:32 +08:00
|
|
|
if (k != 0) {
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(",");
|
2012-06-04 00:35:32 +08:00
|
|
|
}
|
2017-06-20 14:18:10 +08:00
|
|
|
col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[k++]));
|
2008-05-10 18:52:05 +08:00
|
|
|
}
|
2010-01-08 02:13:31 +08:00
|
|
|
switch (c) {
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
2012-09-22 01:13:34 +08:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
case INDEX_op_brcond2_i32:
|
2010-01-08 02:13:31 +08:00
|
|
|
case INDEX_op_setcond2_i32:
|
2012-09-22 01:13:34 +08:00
|
|
|
case INDEX_op_brcond_i64:
|
2010-01-08 02:13:31 +08:00
|
|
|
case INDEX_op_setcond_i64:
|
2012-09-22 01:13:34 +08:00
|
|
|
case INDEX_op_movcond_i64:
|
2017-11-18 03:47:42 +08:00
|
|
|
case INDEX_op_cmp_vec:
|
2016-12-09 05:12:08 +08:00
|
|
|
if (op->args[k] < ARRAY_SIZE(cond_name)
|
|
|
|
&& cond_name[op->args[k]]) {
|
|
|
|
col += qemu_log(",%s", cond_name[op->args[k++]]);
|
2012-06-04 00:35:32 +08:00
|
|
|
} else {
|
2016-12-09 05:12:08 +08:00
|
|
|
col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
|
2012-06-04 00:35:32 +08:00
|
|
|
}
|
2008-09-14 15:45:17 +08:00
|
|
|
i = 1;
|
2010-01-08 02:13:31 +08:00
|
|
|
break;
|
2013-09-04 23:11:05 +08:00
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
|
|
case INDEX_op_qemu_st_i32:
|
|
|
|
case INDEX_op_qemu_ld_i64:
|
|
|
|
case INDEX_op_qemu_st_i64:
|
2015-05-13 02:51:44 +08:00
|
|
|
{
|
2016-12-09 05:12:08 +08:00
|
|
|
TCGMemOpIdx oi = op->args[k++];
|
2015-05-13 02:51:44 +08:00
|
|
|
TCGMemOp op = get_memop(oi);
|
|
|
|
unsigned ix = get_mmuidx(oi);
|
|
|
|
|
2015-06-02 05:38:56 +08:00
|
|
|
if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(",$0x%x,%u", op, ix);
|
2015-06-02 05:38:56 +08:00
|
|
|
} else {
|
2016-06-24 02:16:46 +08:00
|
|
|
const char *s_al, *s_op;
|
|
|
|
s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
|
2015-06-02 05:38:56 +08:00
|
|
|
s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
|
2016-06-24 10:15:55 +08:00
|
|
|
col += qemu_log(",%s%s,%u", s_al, s_op, ix);
|
2015-05-13 02:51:44 +08:00
|
|
|
}
|
|
|
|
i = 1;
|
2013-09-04 23:11:05 +08:00
|
|
|
}
|
|
|
|
break;
|
2010-01-08 02:13:31 +08:00
|
|
|
default:
|
2008-09-14 15:45:17 +08:00
|
|
|
i = 0;
|
2010-01-08 02:13:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-02-14 10:51:05 +08:00
|
|
|
switch (c) {
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
case INDEX_op_brcond2_i32:
|
2016-12-09 05:12:08 +08:00
|
|
|
col += qemu_log("%s$L%d", k ? "," : "",
|
|
|
|
arg_label(op->args[k])->id);
|
2015-02-14 10:51:05 +08:00
|
|
|
i++, k++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
for (; i < nb_cargs; i++, k++) {
|
2016-12-09 05:12:08 +08:00
|
|
|
col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
|
2016-06-24 10:15:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (op->life) {
|
|
|
|
unsigned life = op->life;
|
|
|
|
|
|
|
|
for (; col < 48; ++col) {
|
|
|
|
putc(' ', qemu_logfile);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (life & (SYNC_ARG * 3)) {
|
|
|
|
qemu_log(" sync:");
|
|
|
|
for (i = 0; i < 2; ++i) {
|
|
|
|
if (life & (SYNC_ARG << i)) {
|
|
|
|
qemu_log(" %d", i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
life /= DEAD_ARG;
|
|
|
|
if (life) {
|
|
|
|
qemu_log(" dead:");
|
|
|
|
for (i = 0; life; ++i, life >>= 1) {
|
|
|
|
if (life & 1) {
|
|
|
|
qemu_log(" %d", i);
|
|
|
|
}
|
|
|
|
}
|
2008-05-10 18:52:05 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2012-06-04 00:35:32 +08:00
|
|
|
qemu_log("\n");
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we give more priority to constraints with less registers */
|
|
|
|
static int get_constraint_priority(const TCGOpDef *def, int k)
|
|
|
|
{
|
|
|
|
const TCGArgConstraint *arg_ct;
|
|
|
|
|
|
|
|
int i, n;
|
|
|
|
arg_ct = &def->args_ct[k];
|
|
|
|
if (arg_ct->ct & TCG_CT_ALIAS) {
|
|
|
|
/* an alias is equivalent to a single register */
|
|
|
|
n = 1;
|
|
|
|
} else {
|
|
|
|
if (!(arg_ct->ct & TCG_CT_REG))
|
|
|
|
return 0;
|
|
|
|
n = 0;
|
|
|
|
for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
|
|
|
if (tcg_regset_test_reg(arg_ct->u.regs, i))
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return TCG_TARGET_NB_REGS - n + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sort from highest priority to lowest */
|
|
|
|
static void sort_constraints(TCGOpDef *def, int start, int n)
|
|
|
|
{
|
|
|
|
int i, j, p1, p2, tmp;
|
|
|
|
|
|
|
|
for(i = 0; i < n; i++)
|
|
|
|
def->sorted_args[start + i] = start + i;
|
|
|
|
if (n <= 1)
|
|
|
|
return;
|
|
|
|
for(i = 0; i < n - 1; i++) {
|
|
|
|
for(j = i + 1; j < n; j++) {
|
|
|
|
p1 = get_constraint_priority(def, def->sorted_args[start + i]);
|
|
|
|
p2 = get_constraint_priority(def, def->sorted_args[start + j]);
|
|
|
|
if (p1 < p2) {
|
|
|
|
tmp = def->sorted_args[start + i];
|
|
|
|
def->sorted_args[start + i] = def->sorted_args[start + j];
|
|
|
|
def->sorted_args[start + j] = tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-18 16:31:40 +08:00
|
|
|
static void process_op_defs(TCGContext *s)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2010-03-20 02:12:29 +08:00
|
|
|
TCGOpcode op;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2016-11-18 16:31:40 +08:00
|
|
|
for (op = 0; op < NB_OPS; op++) {
|
|
|
|
TCGOpDef *def = &tcg_op_defs[op];
|
|
|
|
const TCGTargetOpDef *tdefs;
|
2016-11-18 18:50:59 +08:00
|
|
|
TCGType type;
|
|
|
|
int i, nb_args;
|
2016-11-18 16:31:40 +08:00
|
|
|
|
|
|
|
if (def->flags & TCG_OPF_NOT_PRESENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
nb_args = def->nb_iargs + def->nb_oargs;
|
2016-11-18 16:31:40 +08:00
|
|
|
if (nb_args == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
tdefs = tcg_target_op_def(op);
|
|
|
|
/* Missing TCGTargetOpDef entry. */
|
|
|
|
tcg_debug_assert(tdefs != NULL);
|
|
|
|
|
2016-11-18 18:50:59 +08:00
|
|
|
type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
|
2016-11-18 16:31:40 +08:00
|
|
|
for (i = 0; i < nb_args; i++) {
|
|
|
|
const char *ct_str = tdefs->args_ct_str[i];
|
|
|
|
/* Incomplete TCGTargetOpDef entry. */
|
2016-04-21 16:48:49 +08:00
|
|
|
tcg_debug_assert(ct_str != NULL);
|
2016-11-18 16:31:40 +08:00
|
|
|
|
2017-09-12 02:25:55 +08:00
|
|
|
def->args_ct[i].u.regs = 0;
|
2008-02-01 18:05:41 +08:00
|
|
|
def->args_ct[i].ct = 0;
|
2016-11-19 00:41:24 +08:00
|
|
|
while (*ct_str != '\0') {
|
|
|
|
switch(*ct_str) {
|
|
|
|
case '0' ... '9':
|
|
|
|
{
|
|
|
|
int oarg = *ct_str - '0';
|
|
|
|
tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
|
|
|
|
tcg_debug_assert(oarg < def->nb_oargs);
|
|
|
|
tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
|
|
|
|
/* TCG_CT_ALIAS is for the output arguments.
|
|
|
|
The input is tagged with TCG_CT_IALIAS. */
|
|
|
|
def->args_ct[i] = def->args_ct[oarg];
|
|
|
|
def->args_ct[oarg].ct |= TCG_CT_ALIAS;
|
|
|
|
def->args_ct[oarg].alias_index = i;
|
|
|
|
def->args_ct[i].ct |= TCG_CT_IALIAS;
|
|
|
|
def->args_ct[i].alias_index = oarg;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2016-11-19 00:41:24 +08:00
|
|
|
ct_str++;
|
|
|
|
break;
|
|
|
|
case '&':
|
|
|
|
def->args_ct[i].ct |= TCG_CT_NEWREG;
|
|
|
|
ct_str++;
|
|
|
|
break;
|
|
|
|
case 'i':
|
|
|
|
def->args_ct[i].ct |= TCG_CT_CONST;
|
|
|
|
ct_str++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ct_str = target_parse_constraint(&def->args_ct[i],
|
|
|
|
ct_str, type);
|
|
|
|
/* Typo in TCGTargetOpDef constraint. */
|
|
|
|
tcg_debug_assert(ct_str != NULL);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-16 00:17:21 +08:00
|
|
|
/* TCGTargetOpDef entry with too much information? */
|
2016-04-21 16:48:49 +08:00
|
|
|
tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
|
2010-02-16 00:17:21 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
/* sort the constraints (XXX: this is just an heuristic) */
|
|
|
|
sort_constraints(def, 0, def->nb_oargs);
|
|
|
|
sort_constraints(def, def->nb_oargs, def->nb_iargs);
|
2010-03-20 02:12:29 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2014-03-31 07:51:54 +08:00
|
|
|
void tcg_op_remove(TCGContext *s, TCGOp *op)
|
|
|
|
{
|
2017-11-02 22:19:14 +08:00
|
|
|
QTAILQ_REMOVE(&s->ops, op, link);
|
|
|
|
QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
|
2018-05-09 03:18:59 +08:00
|
|
|
s->nb_ops--;
|
2014-03-31 07:51:54 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PROFILER
|
2017-07-06 07:35:06 +08:00
|
|
|
atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
|
2014-03-31 07:51:54 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
static TCGOp *tcg_op_alloc(TCGOpcode opc)
|
2016-06-24 11:34:33 +08:00
|
|
|
{
|
2017-11-02 22:19:14 +08:00
|
|
|
TCGContext *s = tcg_ctx;
|
|
|
|
TCGOp *op;
|
2016-06-24 11:34:33 +08:00
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
if (likely(QTAILQ_EMPTY(&s->free_ops))) {
|
|
|
|
op = tcg_malloc(sizeof(TCGOp));
|
|
|
|
} else {
|
|
|
|
op = QTAILQ_FIRST(&s->free_ops);
|
|
|
|
QTAILQ_REMOVE(&s->free_ops, op, link);
|
|
|
|
}
|
|
|
|
memset(op, 0, offsetof(TCGOp, link));
|
|
|
|
op->opc = opc;
|
2018-05-09 03:18:59 +08:00
|
|
|
s->nb_ops++;
|
2016-06-24 11:34:33 +08:00
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGOp *tcg_emit_op(TCGOpcode opc)
|
|
|
|
{
|
|
|
|
TCGOp *op = tcg_op_alloc(opc);
|
|
|
|
QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
|
|
|
|
return op;
|
|
|
|
}
|
2016-06-24 11:34:33 +08:00
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
|
|
|
|
TCGOpcode opc, int nargs)
|
|
|
|
{
|
|
|
|
TCGOp *new_op = tcg_op_alloc(opc);
|
|
|
|
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
|
2016-06-24 11:34:33 +08:00
|
|
|
return new_op;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
|
|
|
|
TCGOpcode opc, int nargs)
|
|
|
|
{
|
2017-11-02 22:19:14 +08:00
|
|
|
TCGOp *new_op = tcg_op_alloc(opc);
|
|
|
|
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
|
2016-06-24 11:34:33 +08:00
|
|
|
return new_op;
|
|
|
|
}
|
|
|
|
|
2016-06-24 11:34:22 +08:00
|
|
|
#define TS_DEAD 1
|
|
|
|
#define TS_MEM 2
|
|
|
|
|
2016-06-24 11:34:33 +08:00
|
|
|
#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
|
|
|
|
#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
|
|
|
|
|
2012-10-10 03:53:07 +08:00
|
|
|
/* liveness analysis: end of function: all temps are dead, and globals
|
|
|
|
should be in memory. */
|
2016-11-02 05:56:04 +08:00
|
|
|
static void tcg_la_func_end(TCGContext *s)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2016-11-02 05:56:04 +08:00
|
|
|
int ng = s->nb_globals;
|
|
|
|
int nt = s->nb_temps;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ng; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
|
|
|
}
|
|
|
|
for (i = ng; i < nt; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD;
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2012-10-10 03:53:07 +08:00
|
|
|
/* liveness analysis: end of basic block: all temps are dead, globals
|
|
|
|
and local temps should be in memory. */
|
2016-11-02 05:56:04 +08:00
|
|
|
static void tcg_la_bb_end(TCGContext *s)
|
2008-05-26 01:24:00 +08:00
|
|
|
{
|
2016-11-02 05:56:04 +08:00
|
|
|
int ng = s->nb_globals;
|
|
|
|
int nt = s->nb_temps;
|
|
|
|
int i;
|
2008-05-26 01:24:00 +08:00
|
|
|
|
2016-11-02 05:56:04 +08:00
|
|
|
for (i = 0; i < ng; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
|
|
|
}
|
|
|
|
for (i = ng; i < nt; ++i) {
|
|
|
|
s->temps[i].state = (s->temps[i].temp_local
|
|
|
|
? TS_DEAD | TS_MEM
|
|
|
|
: TS_DEAD);
|
2008-05-26 01:24:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-23 06:46:09 +08:00
|
|
|
/* Liveness analysis : update the opc_arg_life array to tell if a
|
2008-02-01 18:05:41 +08:00
|
|
|
given input arguments is dead. Instructions updating dead
|
|
|
|
temporaries are removed. */
|
2016-11-02 05:56:04 +08:00
|
|
|
static void liveness_pass_1(TCGContext *s)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2016-06-24 11:34:22 +08:00
|
|
|
int nb_globals = s->nb_globals;
|
2017-11-02 22:19:14 +08:00
|
|
|
TCGOp *op, *op_prev;
|
2016-06-23 06:46:09 +08:00
|
|
|
|
2016-11-02 05:56:04 +08:00
|
|
|
tcg_la_func_end(s);
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) {
|
2014-09-20 04:49:15 +08:00
|
|
|
int i, nb_iargs, nb_oargs;
|
|
|
|
TCGOpcode opc_new, opc_new2;
|
|
|
|
bool have_opc_new2;
|
2016-06-23 06:46:09 +08:00
|
|
|
TCGLifeData arg_life = 0;
|
2016-11-02 05:56:04 +08:00
|
|
|
TCGTemp *arg_ts;
|
2014-09-20 04:49:15 +08:00
|
|
|
TCGOpcode opc = op->opc;
|
|
|
|
const TCGOpDef *def = &tcg_op_defs[opc];
|
|
|
|
|
|
|
|
switch (opc) {
|
2008-02-01 18:05:41 +08:00
|
|
|
case INDEX_op_call:
|
2008-05-17 20:42:15 +08:00
|
|
|
{
|
|
|
|
int call_flags;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2017-11-14 20:02:51 +08:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2016-12-09 05:12:08 +08:00
|
|
|
call_flags = op->args[nb_oargs + nb_iargs + 1];
|
2008-05-17 20:42:15 +08:00
|
|
|
|
2014-09-20 04:49:15 +08:00
|
|
|
/* pure functions can be removed if their result is unused */
|
2012-10-10 03:53:08 +08:00
|
|
|
if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
|
2014-03-23 11:06:52 +08:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts->state != TS_DEAD) {
|
2008-05-17 20:42:15 +08:00
|
|
|
goto do_not_remove_call;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2008-05-17 20:42:15 +08:00
|
|
|
}
|
2014-09-20 04:49:15 +08:00
|
|
|
goto do_remove;
|
2008-05-17 20:42:15 +08:00
|
|
|
} else {
|
|
|
|
do_not_remove_call:
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2008-05-17 20:42:15 +08:00
|
|
|
/* output args are dead */
|
2014-03-23 11:06:52 +08:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts->state & TS_DEAD) {
|
2016-06-23 06:46:09 +08:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2011-05-18 00:25:45 +08:00
|
|
|
}
|
2016-11-02 05:56:04 +08:00
|
|
|
if (arg_ts->state & TS_MEM) {
|
2016-06-23 06:46:09 +08:00
|
|
|
arg_life |= SYNC_ARG << i;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts->state = TS_DEAD;
|
2008-05-17 20:42:15 +08:00
|
|
|
}
|
2012-10-10 03:53:08 +08:00
|
|
|
|
|
|
|
if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
|
|
|
|
TCG_CALL_NO_READ_GLOBALS))) {
|
2012-10-10 03:53:07 +08:00
|
|
|
/* globals should go back to memory */
|
2016-11-02 05:56:04 +08:00
|
|
|
for (i = 0; i < nb_globals; i++) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
|
|
|
}
|
2016-06-24 11:34:22 +08:00
|
|
|
} else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
|
|
|
|
/* globals should be synced to memory */
|
|
|
|
for (i = 0; i < nb_globals; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
s->temps[i].state |= TS_MEM;
|
2016-06-24 11:34:22 +08:00
|
|
|
}
|
2009-04-06 20:33:59 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 03:47:08 +08:00
|
|
|
/* record arguments that die in this helper */
|
2014-03-23 11:06:52 +08:00
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts && arg_ts->state & TS_DEAD) {
|
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-05-17 20:42:15 +08:00
|
|
|
}
|
|
|
|
}
|
2015-09-09 05:45:14 +08:00
|
|
|
/* input arguments are live for preceding opcodes */
|
2016-06-24 11:34:22 +08:00
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts) {
|
|
|
|
arg_ts->state &= ~TS_DEAD;
|
2016-06-24 11:34:22 +08:00
|
|
|
}
|
2015-06-05 03:47:08 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2015-08-30 03:37:33 +08:00
|
|
|
case INDEX_op_insn_start:
|
2008-02-01 18:05:41 +08:00
|
|
|
break;
|
2008-02-04 08:37:54 +08:00
|
|
|
case INDEX_op_discard:
|
|
|
|
/* mark the temporary as dead */
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_temp(op->args[0])->state = TS_DEAD;
|
2008-02-04 08:37:54 +08:00
|
|
|
break;
|
2012-10-03 02:32:29 +08:00
|
|
|
|
|
|
|
case INDEX_op_add2_i32:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_add_i32;
|
2013-02-20 15:52:02 +08:00
|
|
|
goto do_addsub2;
|
2012-10-03 02:32:29 +08:00
|
|
|
case INDEX_op_sub2_i32:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_sub_i32;
|
2013-02-20 15:52:02 +08:00
|
|
|
goto do_addsub2;
|
|
|
|
case INDEX_op_add2_i64:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_add_i64;
|
2013-02-20 15:52:02 +08:00
|
|
|
goto do_addsub2;
|
|
|
|
case INDEX_op_sub2_i64:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_sub_i64;
|
2013-02-20 15:52:02 +08:00
|
|
|
do_addsub2:
|
2012-10-03 02:32:29 +08:00
|
|
|
nb_iargs = 4;
|
|
|
|
nb_oargs = 2;
|
|
|
|
/* Test if the high part of the operation is dead, but not
|
|
|
|
the low part. The result can be optimized to a simple
|
|
|
|
add or sub. This happens often for x86_64 guest when the
|
|
|
|
cpu mode is set to 32 bit. */
|
2016-11-02 05:56:04 +08:00
|
|
|
if (arg_temp(op->args[1])->state == TS_DEAD) {
|
|
|
|
if (arg_temp(op->args[0])->state == TS_DEAD) {
|
2012-10-03 02:32:29 +08:00
|
|
|
goto do_remove;
|
|
|
|
}
|
2014-09-20 04:49:15 +08:00
|
|
|
/* Replace the opcode and adjust the args in place,
|
|
|
|
leaving 3 unused args at the end. */
|
|
|
|
op->opc = opc = opc_new;
|
2016-12-09 05:12:08 +08:00
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[4];
|
2012-10-03 02:32:29 +08:00
|
|
|
/* Fall through and mark the single-word operation live. */
|
|
|
|
nb_iargs = 2;
|
|
|
|
nb_oargs = 1;
|
|
|
|
}
|
|
|
|
goto do_not_remove;
|
|
|
|
|
2012-10-03 02:32:30 +08:00
|
|
|
case INDEX_op_mulu2_i32:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_mul_i32;
|
|
|
|
opc_new2 = INDEX_op_muluh_i32;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
|
2013-08-15 05:35:56 +08:00
|
|
|
goto do_mul2;
|
2013-02-20 15:52:02 +08:00
|
|
|
case INDEX_op_muls2_i32:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_mul_i32;
|
|
|
|
opc_new2 = INDEX_op_mulsh_i32;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
|
2013-02-20 15:52:02 +08:00
|
|
|
goto do_mul2;
|
|
|
|
case INDEX_op_mulu2_i64:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_mul_i64;
|
|
|
|
opc_new2 = INDEX_op_muluh_i64;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
|
2013-08-15 05:35:56 +08:00
|
|
|
goto do_mul2;
|
2013-02-20 15:52:02 +08:00
|
|
|
case INDEX_op_muls2_i64:
|
2014-09-20 04:49:15 +08:00
|
|
|
opc_new = INDEX_op_mul_i64;
|
|
|
|
opc_new2 = INDEX_op_mulsh_i64;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
|
2013-08-15 05:35:56 +08:00
|
|
|
goto do_mul2;
|
2013-02-20 15:52:02 +08:00
|
|
|
do_mul2:
|
2012-10-03 02:32:30 +08:00
|
|
|
nb_iargs = 2;
|
|
|
|
nb_oargs = 2;
|
2016-11-02 05:56:04 +08:00
|
|
|
if (arg_temp(op->args[1])->state == TS_DEAD) {
|
|
|
|
if (arg_temp(op->args[0])->state == TS_DEAD) {
|
2013-08-15 05:35:56 +08:00
|
|
|
/* Both parts of the operation are dead. */
|
2012-10-03 02:32:30 +08:00
|
|
|
goto do_remove;
|
|
|
|
}
|
2013-08-15 05:35:56 +08:00
|
|
|
/* The high part of the operation is dead; generate the low. */
|
2014-09-20 04:49:15 +08:00
|
|
|
op->opc = opc = opc_new;
|
2016-12-09 05:12:08 +08:00
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[3];
|
2016-11-02 05:56:04 +08:00
|
|
|
} else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
|
2014-09-20 04:49:15 +08:00
|
|
|
/* The low part of the operation is dead; generate the high. */
|
|
|
|
op->opc = opc = opc_new2;
|
2016-12-09 05:12:08 +08:00
|
|
|
op->args[0] = op->args[1];
|
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[3];
|
2013-08-15 05:35:56 +08:00
|
|
|
} else {
|
|
|
|
goto do_not_remove;
|
2012-10-03 02:32:30 +08:00
|
|
|
}
|
2013-08-15 05:35:56 +08:00
|
|
|
/* Mark the single-word operation live. */
|
|
|
|
nb_oargs = 1;
|
2012-10-03 02:32:30 +08:00
|
|
|
goto do_not_remove;
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
default:
|
2012-10-03 02:32:29 +08:00
|
|
|
/* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
|
2008-12-08 02:15:45 +08:00
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_oargs = def->nb_oargs;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2008-12-08 02:15:45 +08:00
|
|
|
/* Test if the operation can be removed because all
|
|
|
|
its outputs are dead. We assume that nb_oargs == 0
|
|
|
|
implies side effects */
|
|
|
|
if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
|
2014-09-20 04:49:15 +08:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
if (arg_temp(op->args[i])->state != TS_DEAD) {
|
2008-12-08 02:15:45 +08:00
|
|
|
goto do_not_remove;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2008-12-08 02:15:45 +08:00
|
|
|
}
|
2012-10-03 02:32:29 +08:00
|
|
|
do_remove:
|
2014-03-31 07:51:54 +08:00
|
|
|
tcg_op_remove(s, op);
|
2008-12-08 02:15:45 +08:00
|
|
|
} else {
|
|
|
|
do_not_remove:
|
|
|
|
/* output args are dead */
|
2014-09-20 04:49:15 +08:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts->state & TS_DEAD) {
|
2016-06-23 06:46:09 +08:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2011-05-18 00:25:45 +08:00
|
|
|
}
|
2016-11-02 05:56:04 +08:00
|
|
|
if (arg_ts->state & TS_MEM) {
|
2016-06-23 06:46:09 +08:00
|
|
|
arg_life |= SYNC_ARG << i;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts->state = TS_DEAD;
|
2008-12-08 02:15:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if end of basic block, update */
|
|
|
|
if (def->flags & TCG_OPF_BB_END) {
|
2016-11-02 05:56:04 +08:00
|
|
|
tcg_la_bb_end(s);
|
2012-10-10 03:53:08 +08:00
|
|
|
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
|
|
|
/* globals should be synced to memory */
|
2016-06-24 11:34:22 +08:00
|
|
|
for (i = 0; i < nb_globals; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
s->temps[i].state |= TS_MEM;
|
2016-06-24 11:34:22 +08:00
|
|
|
}
|
2008-12-08 02:15:45 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 03:47:08 +08:00
|
|
|
/* record arguments that die in this opcode */
|
2014-09-20 04:49:15 +08:00
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts->state & TS_DEAD) {
|
2016-06-23 06:46:09 +08:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2015-06-05 03:47:08 +08:00
|
|
|
}
|
2015-09-09 05:45:14 +08:00
|
|
|
/* input arguments are live for preceding opcodes */
|
2015-06-05 03:47:08 +08:00
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_temp(op->args[i])->state &= ~TS_DEAD;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-06-23 11:43:29 +08:00
|
|
|
op->life = arg_life;
|
2012-11-12 17:27:48 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2016-06-24 11:34:33 +08:00
|
|
|
/* Liveness analysis: Convert indirect regs to direct temporaries. */
|
2016-11-02 05:56:04 +08:00
|
|
|
static bool liveness_pass_2(TCGContext *s)
|
2016-06-24 11:34:33 +08:00
|
|
|
{
|
|
|
|
int nb_globals = s->nb_globals;
|
2017-11-02 22:19:14 +08:00
|
|
|
int nb_temps, i;
|
2016-06-24 11:34:33 +08:00
|
|
|
bool changes = false;
|
2017-11-02 22:19:14 +08:00
|
|
|
TCGOp *op, *op_next;
|
2016-06-24 11:34:33 +08:00
|
|
|
|
|
|
|
/* Create a temporary for each indirect global. */
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
TCGTemp *its = &s->temps[i];
|
|
|
|
if (its->indirect_reg) {
|
|
|
|
TCGTemp *dts = tcg_temp_alloc(s);
|
|
|
|
dts->type = its->type;
|
|
|
|
dts->base_type = its->base_type;
|
2016-11-02 05:56:04 +08:00
|
|
|
its->state_ptr = dts;
|
|
|
|
} else {
|
|
|
|
its->state_ptr = NULL;
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
2016-11-02 05:56:04 +08:00
|
|
|
/* All globals begin dead. */
|
|
|
|
its->state = TS_DEAD;
|
|
|
|
}
|
|
|
|
for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
|
|
|
|
TCGTemp *its = &s->temps[i];
|
|
|
|
its->state_ptr = NULL;
|
|
|
|
its->state = TS_DEAD;
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
|
2016-06-24 11:34:33 +08:00
|
|
|
TCGOpcode opc = op->opc;
|
|
|
|
const TCGOpDef *def = &tcg_op_defs[opc];
|
|
|
|
TCGLifeData arg_life = op->life;
|
|
|
|
int nb_iargs, nb_oargs, call_flags;
|
2016-11-02 05:56:04 +08:00
|
|
|
TCGTemp *arg_ts, *dir_ts;
|
2016-06-24 11:34:33 +08:00
|
|
|
|
|
|
|
if (opc == INDEX_op_call) {
|
2017-11-14 20:02:51 +08:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2016-12-09 05:12:08 +08:00
|
|
|
call_flags = op->args[nb_oargs + nb_iargs + 1];
|
2016-06-24 11:34:33 +08:00
|
|
|
} else {
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
|
|
|
|
/* Set flags similar to how calls require. */
|
|
|
|
if (def->flags & TCG_OPF_BB_END) {
|
|
|
|
/* Like writing globals: save_globals */
|
|
|
|
call_flags = 0;
|
|
|
|
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
|
|
|
/* Like reading globals: sync_globals */
|
|
|
|
call_flags = TCG_CALL_NO_WRITE_GLOBALS;
|
|
|
|
} else {
|
|
|
|
/* No effect on globals. */
|
|
|
|
call_flags = (TCG_CALL_NO_READ_GLOBALS |
|
|
|
|
TCG_CALL_NO_WRITE_GLOBALS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure that input arguments are available. */
|
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts) {
|
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (dir_ts && arg_ts->state == TS_DEAD) {
|
|
|
|
TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
|
2016-06-24 11:34:33 +08:00
|
|
|
? INDEX_op_ld_i32
|
|
|
|
: INDEX_op_ld_i64);
|
|
|
|
TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
|
|
|
|
|
2016-11-02 05:56:04 +08:00
|
|
|
lop->args[0] = temp_arg(dir_ts);
|
|
|
|
lop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
lop->args[2] = arg_ts->mem_offset;
|
2016-06-24 11:34:33 +08:00
|
|
|
|
|
|
|
/* Loaded, but synced with memory. */
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts->state = TS_MEM;
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform input replacement, and mark inputs that became dead.
|
|
|
|
No action is required except keeping temp_state up to date
|
|
|
|
so that we reload when needed. */
|
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts) {
|
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (dir_ts) {
|
|
|
|
op->args[i] = temp_arg(dir_ts);
|
2016-06-24 11:34:33 +08:00
|
|
|
changes = true;
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts->state = TS_DEAD;
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Liveness analysis should ensure that the following are
|
|
|
|
all correct, for call sites and basic block end points. */
|
|
|
|
if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
|
|
|
|
/* Nothing to do */
|
|
|
|
} else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
/* Liveness should see that globals are synced back,
|
|
|
|
that is, either TS_DEAD or TS_MEM. */
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = &s->temps[i];
|
|
|
|
tcg_debug_assert(arg_ts->state_ptr == 0
|
|
|
|
|| arg_ts->state != 0);
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
/* Liveness should see that globals are saved back,
|
|
|
|
that is, TS_DEAD, waiting to be reloaded. */
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = &s->temps[i];
|
|
|
|
tcg_debug_assert(arg_ts->state_ptr == 0
|
|
|
|
|| arg_ts->state == TS_DEAD);
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Outputs become available. */
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (!dir_ts) {
|
2016-06-24 11:34:33 +08:00
|
|
|
continue;
|
|
|
|
}
|
2016-11-02 05:56:04 +08:00
|
|
|
op->args[i] = temp_arg(dir_ts);
|
2016-06-24 11:34:33 +08:00
|
|
|
changes = true;
|
|
|
|
|
|
|
|
/* The output is now live and modified. */
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts->state = 0;
|
2016-06-24 11:34:33 +08:00
|
|
|
|
|
|
|
/* Sync outputs upon their last write. */
|
|
|
|
if (NEED_SYNC_ARG(i)) {
|
2016-11-02 05:56:04 +08:00
|
|
|
TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
|
2016-06-24 11:34:33 +08:00
|
|
|
? INDEX_op_st_i32
|
|
|
|
: INDEX_op_st_i64);
|
|
|
|
TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
|
|
|
|
|
2016-11-02 05:56:04 +08:00
|
|
|
sop->args[0] = temp_arg(dir_ts);
|
|
|
|
sop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
sop->args[2] = arg_ts->mem_offset;
|
2016-06-24 11:34:33 +08:00
|
|
|
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts->state = TS_MEM;
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
/* Drop outputs that are dead. */
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
2016-11-02 05:56:04 +08:00
|
|
|
arg_ts->state = TS_DEAD;
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return changes;
|
|
|
|
}
|
|
|
|
|
2016-04-21 16:48:50 +08:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
2008-02-01 18:05:41 +08:00
|
|
|
static void dump_regs(TCGContext *s)
|
|
|
|
{
|
|
|
|
TCGTemp *ts;
|
|
|
|
int i;
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
for(i = 0; i < s->nb_temps; i++) {
|
|
|
|
ts = &s->temps[i];
|
2017-06-20 14:18:10 +08:00
|
|
|
printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
|
2008-02-01 18:05:41 +08:00
|
|
|
switch(ts->val_type) {
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
printf("%s", tcg_target_reg_names[ts->reg]);
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_MEM:
|
2013-09-19 05:12:53 +08:00
|
|
|
printf("%d(%s)", (int)ts->mem_offset,
|
|
|
|
tcg_target_reg_names[ts->mem_base->reg]);
|
2008-02-01 18:05:41 +08:00
|
|
|
break;
|
|
|
|
case TEMP_VAL_CONST:
|
|
|
|
printf("$0x%" TCG_PRIlx, ts->val);
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
printf("D");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printf("???");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
2013-09-19 06:21:56 +08:00
|
|
|
if (s->reg_to_temp[i] != NULL) {
|
2008-02-01 18:05:41 +08:00
|
|
|
printf("%s: %s\n",
|
|
|
|
tcg_target_reg_names[i],
|
2013-09-19 06:21:56 +08:00
|
|
|
tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_regs(TCGContext *s)
|
|
|
|
{
|
2016-02-10 02:20:16 +08:00
|
|
|
int reg;
|
2013-09-19 05:54:45 +08:00
|
|
|
int k;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGTemp *ts;
|
|
|
|
char buf[64];
|
|
|
|
|
2013-09-19 06:21:56 +08:00
|
|
|
for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
|
|
|
|
ts = s->reg_to_temp[reg];
|
|
|
|
if (ts != NULL) {
|
|
|
|
if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
|
2008-02-01 18:05:41 +08:00
|
|
|
printf("Inconsistency for register %s:\n",
|
|
|
|
tcg_target_reg_names[reg]);
|
2008-05-10 18:52:05 +08:00
|
|
|
goto fail;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-09-19 06:21:56 +08:00
|
|
|
for (k = 0; k < s->nb_temps; k++) {
|
2008-02-01 18:05:41 +08:00
|
|
|
ts = &s->temps[k];
|
2013-09-19 06:21:56 +08:00
|
|
|
if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
|
|
|
|
&& s->reg_to_temp[ts->reg] != ts) {
|
|
|
|
printf("Inconsistency for temp %s:\n",
|
|
|
|
tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
|
2008-05-10 18:52:05 +08:00
|
|
|
fail:
|
2013-09-19 06:21:56 +08:00
|
|
|
printf("reg state:\n");
|
|
|
|
dump_regs(s);
|
|
|
|
tcg_abort();
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-11-09 22:25:09 +08:00
|
|
|
static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2012-09-22 01:34:21 +08:00
|
|
|
#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
|
|
|
|
/* Sparc64 stack is accessed with offset of 2047 */
|
2011-05-14 22:03:22 +08:00
|
|
|
s->current_frame_offset = (s->current_frame_offset +
|
|
|
|
(tcg_target_long)sizeof(tcg_target_long) - 1) &
|
|
|
|
~(sizeof(tcg_target_long) - 1);
|
2011-05-15 01:06:56 +08:00
|
|
|
#endif
|
2011-05-14 22:03:22 +08:00
|
|
|
if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
|
|
|
|
s->frame_end) {
|
2008-02-04 08:37:54 +08:00
|
|
|
tcg_abort();
|
2011-05-14 22:03:22 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->mem_offset = s->current_frame_offset;
|
2013-09-19 05:12:53 +08:00
|
|
|
ts->mem_base = s->frame_temp;
|
2008-02-01 18:05:41 +08:00
|
|
|
ts->mem_allocated = 1;
|
2013-08-21 06:12:31 +08:00
|
|
|
s->current_frame_offset += sizeof(tcg_target_long);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2013-09-20 01:36:18 +08:00
|
|
|
static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
|
|
|
|
|
2016-06-20 13:59:13 +08:00
|
|
|
/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
|
|
|
|
mark it free; otherwise mark it dead. */
|
|
|
|
static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
|
2012-10-10 03:53:06 +08:00
|
|
|
{
|
2016-06-20 13:59:13 +08:00
|
|
|
if (ts->fixed_reg) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
s->reg_to_temp[ts->reg] = NULL;
|
|
|
|
}
|
|
|
|
ts->val_type = (free_or_dead < 0
|
|
|
|
|| ts->temp_local
|
2016-11-03 01:20:15 +08:00
|
|
|
|| ts->temp_global
|
2016-06-20 13:59:13 +08:00
|
|
|
? TEMP_VAL_MEM : TEMP_VAL_DEAD);
|
|
|
|
}
|
2012-10-10 03:53:06 +08:00
|
|
|
|
2016-06-20 13:59:13 +08:00
|
|
|
/* Mark a temporary as dead. */
|
|
|
|
static inline void temp_dead(TCGContext *s, TCGTemp *ts)
|
|
|
|
{
|
|
|
|
temp_free_or_dead(s, ts, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
|
|
|
|
registers needs to be allocated to store a constant. If 'free_or_dead'
|
|
|
|
is non-zero, subsequently release the temporary; if it is positive, the
|
|
|
|
temp is dead; if it is negative, the temp is free. */
|
|
|
|
static void temp_sync(TCGContext *s, TCGTemp *ts,
|
|
|
|
TCGRegSet allocated_regs, int free_or_dead)
|
|
|
|
{
|
|
|
|
if (ts->fixed_reg) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!ts->mem_coherent) {
|
2012-10-10 03:53:06 +08:00
|
|
|
if (!ts->mem_allocated) {
|
2016-11-09 22:25:09 +08:00
|
|
|
temp_allocate_frame(s, ts);
|
2016-06-20 13:59:13 +08:00
|
|
|
}
|
|
|
|
switch (ts->val_type) {
|
|
|
|
case TEMP_VAL_CONST:
|
|
|
|
/* If we're going to free the temp immediately, then we won't
|
|
|
|
require it later in a register, so attempt to store the
|
|
|
|
constant to memory directly. */
|
|
|
|
if (free_or_dead
|
|
|
|
&& tcg_out_sti(s, ts->type, ts->val,
|
|
|
|
ts->mem_base->reg, ts->mem_offset)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
|
|
|
allocated_regs);
|
|
|
|
/* fallthrough */
|
|
|
|
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
tcg_out_st(s, ts->type, ts->reg,
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_VAL_MEM:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
ts->mem_coherent = 1;
|
|
|
|
}
|
|
|
|
if (free_or_dead) {
|
|
|
|
temp_free_or_dead(s, ts, free_or_dead);
|
2012-10-10 03:53:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
/* free register 'reg' by spilling the corresponding temporary if necessary */
|
2013-09-20 01:36:18 +08:00
|
|
|
static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2013-09-19 06:21:56 +08:00
|
|
|
TCGTemp *ts = s->reg_to_temp[reg];
|
|
|
|
if (ts != NULL) {
|
2016-06-20 13:59:13 +08:00
|
|
|
temp_sync(s, ts, allocated_regs, -1);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate a register belonging to reg1 & ~reg2 */
|
2013-09-20 01:36:18 +08:00
|
|
|
static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
|
2015-08-19 14:23:08 +08:00
|
|
|
TCGRegSet allocated_regs, bool rev)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2015-08-19 14:23:08 +08:00
|
|
|
int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
|
|
|
|
const int *order;
|
2013-09-19 05:54:45 +08:00
|
|
|
TCGReg reg;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGRegSet reg_ct;
|
|
|
|
|
2017-09-12 03:08:13 +08:00
|
|
|
reg_ct = desired_regs & ~allocated_regs;
|
2015-08-19 14:23:08 +08:00
|
|
|
order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
|
|
|
/* first try free registers */
|
2015-08-19 14:23:08 +08:00
|
|
|
for(i = 0; i < n; i++) {
|
|
|
|
reg = order[i];
|
2013-09-19 06:21:56 +08:00
|
|
|
if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
|
2008-02-01 18:05:41 +08:00
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: do better spill choice */
|
2015-08-19 14:23:08 +08:00
|
|
|
for(i = 0; i < n; i++) {
|
|
|
|
reg = order[i];
|
2008-02-01 18:05:41 +08:00
|
|
|
if (tcg_regset_test_reg(reg_ct, reg)) {
|
2013-09-20 01:36:18 +08:00
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
2008-02-01 18:05:41 +08:00
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
|
2013-09-19 23:02:05 +08:00
|
|
|
/* Make sure the temporary is in a register. If needed, allocate the register
|
|
|
|
from DESIRED while avoiding ALLOCATED. */
|
|
|
|
static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
|
|
|
|
TCGRegSet allocated_regs)
|
|
|
|
{
|
|
|
|
TCGReg reg;
|
|
|
|
|
|
|
|
switch (ts->val_type) {
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
return;
|
|
|
|
case TEMP_VAL_CONST:
|
2015-08-19 14:23:08 +08:00
|
|
|
reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
|
2013-09-19 23:02:05 +08:00
|
|
|
tcg_out_movi(s, ts->type, reg, ts->val);
|
|
|
|
ts->mem_coherent = 0;
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_MEM:
|
2015-08-19 14:23:08 +08:00
|
|
|
reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
|
2013-09-19 23:02:05 +08:00
|
|
|
tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
|
|
|
|
ts->mem_coherent = 1;
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
ts->reg = reg;
|
|
|
|
ts->val_type = TEMP_VAL_REG;
|
|
|
|
s->reg_to_temp[reg] = ts;
|
|
|
|
}
|
|
|
|
|
2016-06-20 13:59:13 +08:00
|
|
|
/* Save a temporary to memory. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
|
2012-10-10 03:53:06 +08:00
|
|
|
{
|
2016-06-24 11:34:33 +08:00
|
|
|
/* The liveness analysis already ensures that globals are back
|
|
|
|
in memory. Keep an tcg_debug_assert for safety. */
|
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
|
2012-10-10 03:53:06 +08:00
|
|
|
}
|
|
|
|
|
2011-11-22 18:06:22 +08:00
|
|
|
/* save globals to their canonical location and assume they can be
|
2008-05-24 01:33:39 +08:00
|
|
|
modified be the following code. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2016-11-03 01:21:44 +08:00
|
|
|
int i, n;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2016-11-03 01:21:44 +08:00
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2013-09-19 06:35:32 +08:00
|
|
|
temp_save(s, &s->temps[i], allocated_regs);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2008-05-22 00:24:20 +08:00
|
|
|
}
|
|
|
|
|
2012-10-10 03:53:08 +08:00
|
|
|
/* sync globals to their canonical location and assume they can be
|
|
|
|
read by the following code. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
|
|
|
|
{
|
2016-11-03 01:21:44 +08:00
|
|
|
int i, n;
|
2012-10-10 03:53:08 +08:00
|
|
|
|
2016-11-03 01:21:44 +08:00
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2013-09-19 06:33:00 +08:00
|
|
|
TCGTemp *ts = &s->temps[i];
|
2016-06-24 11:34:33 +08:00
|
|
|
tcg_debug_assert(ts->val_type != TEMP_VAL_REG
|
|
|
|
|| ts->fixed_reg
|
|
|
|
|| ts->mem_coherent);
|
2012-10-10 03:53:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-22 00:24:20 +08:00
|
|
|
/* at the end of a basic block, we assume all temporaries are dead and
|
2008-05-24 01:33:39 +08:00
|
|
|
all globals are stored at their canonical location. */
|
|
|
|
static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
|
2008-05-22 00:24:20 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2013-09-19 06:35:32 +08:00
|
|
|
for (i = s->nb_globals; i < s->nb_temps; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
2008-05-26 01:24:00 +08:00
|
|
|
if (ts->temp_local) {
|
2013-09-19 06:35:32 +08:00
|
|
|
temp_save(s, ts, allocated_regs);
|
2008-05-26 01:24:00 +08:00
|
|
|
} else {
|
2016-06-24 11:34:33 +08:00
|
|
|
/* The liveness analysis already ensures that temps are dead.
|
|
|
|
Keep an tcg_debug_assert for safety. */
|
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
2008-05-24 01:33:39 +08:00
|
|
|
|
|
|
|
save_globals(s, allocated_regs);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2016-09-15 21:16:00 +08:00
|
|
|
static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
|
|
|
|
tcg_target_ulong val, TCGLifeData arg_life)
|
2008-05-24 01:33:39 +08:00
|
|
|
{
|
|
|
|
if (ots->fixed_reg) {
|
2016-06-20 13:59:13 +08:00
|
|
|
/* For fixed registers, we do not do any constant propagation. */
|
2008-05-24 01:33:39 +08:00
|
|
|
tcg_out_movi(s, ots->type, ots->reg, val);
|
2016-06-20 13:59:13 +08:00
|
|
|
return;
|
2008-05-24 01:33:39 +08:00
|
|
|
}
|
2016-06-20 13:59:13 +08:00
|
|
|
|
|
|
|
/* The movi is not explicitly generated here. */
|
|
|
|
if (ots->val_type == TEMP_VAL_REG) {
|
|
|
|
s->reg_to_temp[ots->reg] = NULL;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2016-06-20 13:59:13 +08:00
|
|
|
ots->val_type = TEMP_VAL_CONST;
|
|
|
|
ots->val = val;
|
|
|
|
ots->mem_coherent = 0;
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
|
|
|
temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
|
|
|
|
} else if (IS_DEAD_ARG(0)) {
|
2013-09-19 06:29:18 +08:00
|
|
|
temp_dead(s, ots);
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2008-05-24 01:33:39 +08:00
|
|
|
}
|
|
|
|
|
2016-12-09 05:42:08 +08:00
|
|
|
static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
|
2016-09-15 21:16:00 +08:00
|
|
|
{
|
2017-06-20 14:18:10 +08:00
|
|
|
TCGTemp *ots = arg_temp(op->args[0]);
|
2016-12-09 05:42:08 +08:00
|
|
|
tcg_target_ulong val = op->args[1];
|
2016-09-15 21:16:00 +08:00
|
|
|
|
2016-12-09 05:42:08 +08:00
|
|
|
tcg_reg_alloc_do_movi(s, ots, val, op->life);
|
2016-09-15 21:16:00 +08:00
|
|
|
}
|
|
|
|
|
2016-12-09 05:42:08 +08:00
|
|
|
static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2016-12-09 05:42:08 +08:00
|
|
|
const TCGLifeData arg_life = op->life;
|
2012-10-10 03:53:07 +08:00
|
|
|
TCGRegSet allocated_regs;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGTemp *ts, *ots;
|
2014-05-14 05:50:18 +08:00
|
|
|
TCGType otype, itype;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2017-09-12 02:58:44 +08:00
|
|
|
allocated_regs = s->reserved_regs;
|
2017-06-20 14:18:10 +08:00
|
|
|
ots = arg_temp(op->args[0]);
|
|
|
|
ts = arg_temp(op->args[1]);
|
2014-05-14 05:50:18 +08:00
|
|
|
|
|
|
|
/* Note that otype != itype for no-op truncation. */
|
|
|
|
otype = ots->type;
|
|
|
|
itype = ts->type;
|
2012-10-10 03:53:07 +08:00
|
|
|
|
2016-09-15 21:16:00 +08:00
|
|
|
if (ts->val_type == TEMP_VAL_CONST) {
|
|
|
|
/* propagate constant or generate sti */
|
|
|
|
tcg_target_ulong val = ts->val;
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, ts);
|
|
|
|
}
|
|
|
|
tcg_reg_alloc_do_movi(s, ots, val, arg_life);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the source value is in memory we're going to be forced
|
|
|
|
to have it in a register in order to perform the copy. Copy
|
|
|
|
the SOURCE value into its own register first, that way we
|
|
|
|
don't have to reload SOURCE the next time it is used. */
|
|
|
|
if (ts->val_type == TEMP_VAL_MEM) {
|
2013-09-19 23:02:05 +08:00
|
|
|
temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2016-09-15 21:16:00 +08:00
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
|
2012-10-10 03:53:07 +08:00
|
|
|
if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
|
|
|
|
/* mov to a non-saved dead register makes no sense (even with
|
|
|
|
liveness analysis disabled). */
|
2016-04-21 16:48:49 +08:00
|
|
|
tcg_debug_assert(NEED_SYNC_ARG(0));
|
2012-10-10 03:53:07 +08:00
|
|
|
if (!ots->mem_allocated) {
|
2016-11-09 22:25:09 +08:00
|
|
|
temp_allocate_frame(s, ots);
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2013-09-19 05:12:53 +08:00
|
|
|
tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
|
2012-10-10 03:53:07 +08:00
|
|
|
if (IS_DEAD_ARG(1)) {
|
2013-09-19 06:29:18 +08:00
|
|
|
temp_dead(s, ts);
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2013-09-19 06:29:18 +08:00
|
|
|
temp_dead(s, ots);
|
2012-10-10 03:53:07 +08:00
|
|
|
} else {
|
2011-05-18 00:25:45 +08:00
|
|
|
if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
|
2008-02-01 18:05:41 +08:00
|
|
|
/* the mov can be suppressed */
|
2012-10-10 03:53:07 +08:00
|
|
|
if (ots->val_type == TEMP_VAL_REG) {
|
2013-09-19 06:21:56 +08:00
|
|
|
s->reg_to_temp[ots->reg] = NULL;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
|
|
|
ots->reg = ts->reg;
|
2013-09-19 06:29:18 +08:00
|
|
|
temp_dead(s, ts);
|
2008-02-01 18:05:41 +08:00
|
|
|
} else {
|
2012-10-10 03:53:07 +08:00
|
|
|
if (ots->val_type != TEMP_VAL_REG) {
|
|
|
|
/* When allocating a new register, make sure to not spill the
|
|
|
|
input one. */
|
|
|
|
tcg_regset_set_reg(allocated_regs, ts->reg);
|
2014-05-14 05:50:18 +08:00
|
|
|
ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
|
2015-08-19 14:23:08 +08:00
|
|
|
allocated_regs, ots->indirect_base);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2014-05-14 05:50:18 +08:00
|
|
|
tcg_out_mov(s, otype, ots->reg, ts->reg);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2012-10-10 03:53:07 +08:00
|
|
|
ots->val_type = TEMP_VAL_REG;
|
|
|
|
ots->mem_coherent = 0;
|
2013-09-19 06:21:56 +08:00
|
|
|
s->reg_to_temp[ots->reg] = ots;
|
2012-10-10 03:53:07 +08:00
|
|
|
if (NEED_SYNC_ARG(0)) {
|
2016-06-20 13:59:13 +08:00
|
|
|
temp_sync(s, ots, allocated_regs, 0);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2016-12-09 05:42:08 +08:00
|
|
|
static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2016-12-09 05:42:08 +08:00
|
|
|
const TCGLifeData arg_life = op->life;
|
|
|
|
const TCGOpDef * const def = &tcg_op_defs[op->opc];
|
2016-11-18 15:35:03 +08:00
|
|
|
TCGRegSet i_allocated_regs;
|
|
|
|
TCGRegSet o_allocated_regs;
|
2013-09-19 05:54:45 +08:00
|
|
|
int i, k, nb_iargs, nb_oargs;
|
|
|
|
TCGReg reg;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGArg arg;
|
|
|
|
const TCGArgConstraint *arg_ct;
|
|
|
|
TCGTemp *ts;
|
|
|
|
TCGArg new_args[TCG_MAX_OP_ARGS];
|
|
|
|
int const_args[TCG_MAX_OP_ARGS];
|
|
|
|
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
|
|
|
|
/* copy constants */
|
|
|
|
memcpy(new_args + nb_oargs + nb_iargs,
|
2016-12-09 05:42:08 +08:00
|
|
|
op->args + nb_oargs + nb_iargs,
|
2008-02-01 18:05:41 +08:00
|
|
|
sizeof(TCGArg) * def->nb_cargs);
|
|
|
|
|
2017-09-12 02:58:44 +08:00
|
|
|
i_allocated_regs = s->reserved_regs;
|
|
|
|
o_allocated_regs = s->reserved_regs;
|
2016-11-18 15:35:03 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
/* satisfy input constraints */
|
2016-12-09 05:42:08 +08:00
|
|
|
for (k = 0; k < nb_iargs; k++) {
|
2008-02-01 18:05:41 +08:00
|
|
|
i = def->sorted_args[nb_oargs + k];
|
2016-12-09 05:42:08 +08:00
|
|
|
arg = op->args[i];
|
2008-02-01 18:05:41 +08:00
|
|
|
arg_ct = &def->args_ct[i];
|
2017-06-20 14:18:10 +08:00
|
|
|
ts = arg_temp(arg);
|
2013-09-19 23:02:05 +08:00
|
|
|
|
|
|
|
if (ts->val_type == TEMP_VAL_CONST
|
|
|
|
&& tcg_target_const_match(ts->val, ts->type, arg_ct)) {
|
|
|
|
/* constant is OK for instruction */
|
|
|
|
const_args[i] = 1;
|
|
|
|
new_args[i] = ts->val;
|
|
|
|
goto iarg_end;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2013-09-19 23:02:05 +08:00
|
|
|
|
2016-11-18 15:35:03 +08:00
|
|
|
temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
|
2013-09-19 23:02:05 +08:00
|
|
|
|
2008-02-04 08:37:54 +08:00
|
|
|
if (arg_ct->ct & TCG_CT_IALIAS) {
|
|
|
|
if (ts->fixed_reg) {
|
|
|
|
/* if fixed register, we must allocate a new register
|
|
|
|
if the alias is not the same register */
|
2016-12-09 05:42:08 +08:00
|
|
|
if (arg != op->args[arg_ct->alias_index])
|
2008-02-04 08:37:54 +08:00
|
|
|
goto allocate_in_reg;
|
|
|
|
} else {
|
|
|
|
/* if the input is aliased to an output and if it is
|
|
|
|
not dead after the instruction, we must allocate
|
|
|
|
a new register and move it */
|
2011-05-18 00:25:45 +08:00
|
|
|
if (!IS_DEAD_ARG(i)) {
|
2008-02-04 08:37:54 +08:00
|
|
|
goto allocate_in_reg;
|
2011-05-18 00:25:45 +08:00
|
|
|
}
|
tcg: fix register allocation with two aliased dead inputs
For TCG ops with two outputs registers (add2, sub2, div2, div2u), when
the same input temp is used for the two inputs aliased to the two
outputs, and when these inputs are both dead, the register allocation
code wrongly assigned the same register to the same output.
This happens for example with sub2 t1, t2, t3, t3, t4, t5, when t3 is
not used anymore after the TCG op. In that case the same register is
used for t1, t2 and t3.
The fix is to look for already allocated aliased input when allocating
a dead aliased input and check that the register is not already
used.
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Message-Id: <1433447228-29425-2-git-send-email-aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2015-06-05 03:47:07 +08:00
|
|
|
/* check if the current register has already been allocated
|
|
|
|
for another input aliased to an output */
|
|
|
|
int k2, i2;
|
|
|
|
for (k2 = 0 ; k2 < k ; k2++) {
|
|
|
|
i2 = def->sorted_args[nb_oargs + k2];
|
|
|
|
if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
|
|
|
|
(new_args[i2] == ts->reg)) {
|
|
|
|
goto allocate_in_reg;
|
|
|
|
}
|
|
|
|
}
|
2008-02-04 08:37:54 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
reg = ts->reg;
|
|
|
|
if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
|
|
|
|
/* nothing to do : the constraint is satisfied */
|
|
|
|
} else {
|
|
|
|
allocate_in_reg:
|
|
|
|
/* allocate a new register matching the constraint
|
|
|
|
and move the temporary register into it */
|
2016-11-18 15:35:03 +08:00
|
|
|
reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
|
2015-08-19 14:23:08 +08:00
|
|
|
ts->indirect_base);
|
2010-06-03 08:26:55 +08:00
|
|
|
tcg_out_mov(s, ts->type, reg, ts->reg);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
new_args[i] = reg;
|
|
|
|
const_args[i] = 0;
|
2016-11-18 15:35:03 +08:00
|
|
|
tcg_regset_set_reg(i_allocated_regs, reg);
|
2008-02-01 18:05:41 +08:00
|
|
|
iarg_end: ;
|
|
|
|
}
|
|
|
|
|
2012-10-10 03:53:07 +08:00
|
|
|
/* mark dead temporaries and free the associated registers */
|
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
2017-06-20 14:18:10 +08:00
|
|
|
temp_dead(s, arg_temp(op->args[i]));
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-24 01:33:39 +08:00
|
|
|
if (def->flags & TCG_OPF_BB_END) {
|
2016-11-18 15:35:03 +08:00
|
|
|
tcg_reg_alloc_bb_end(s, i_allocated_regs);
|
2008-05-24 01:33:39 +08:00
|
|
|
} else {
|
|
|
|
if (def->flags & TCG_OPF_CALL_CLOBBER) {
|
|
|
|
/* XXX: permit generic clobber register list ? */
|
2016-02-09 07:43:42 +08:00
|
|
|
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
2016-11-18 15:35:03 +08:00
|
|
|
tcg_reg_free(s, i, i_allocated_regs);
|
2008-05-24 01:33:39 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2012-10-10 03:53:08 +08:00
|
|
|
}
|
|
|
|
if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
|
|
|
/* sync globals if the op has side effects and might trigger
|
|
|
|
an exception. */
|
2016-11-18 15:35:03 +08:00
|
|
|
sync_globals(s, i_allocated_regs);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2008-05-24 01:33:39 +08:00
|
|
|
|
|
|
|
/* satisfy the output constraints */
|
|
|
|
for(k = 0; k < nb_oargs; k++) {
|
|
|
|
i = def->sorted_args[k];
|
2016-12-09 05:42:08 +08:00
|
|
|
arg = op->args[i];
|
2008-05-24 01:33:39 +08:00
|
|
|
arg_ct = &def->args_ct[i];
|
2017-06-20 14:18:10 +08:00
|
|
|
ts = arg_temp(arg);
|
2016-11-19 00:41:24 +08:00
|
|
|
if ((arg_ct->ct & TCG_CT_ALIAS)
|
|
|
|
&& !const_args[arg_ct->alias_index]) {
|
2008-05-24 01:33:39 +08:00
|
|
|
reg = new_args[arg_ct->alias_index];
|
2016-11-18 15:35:03 +08:00
|
|
|
} else if (arg_ct->ct & TCG_CT_NEWREG) {
|
|
|
|
reg = tcg_reg_alloc(s, arg_ct->u.regs,
|
|
|
|
i_allocated_regs | o_allocated_regs,
|
|
|
|
ts->indirect_base);
|
2008-05-24 01:33:39 +08:00
|
|
|
} else {
|
|
|
|
/* if fixed register, we try to use it */
|
|
|
|
reg = ts->reg;
|
|
|
|
if (ts->fixed_reg &&
|
|
|
|
tcg_regset_test_reg(arg_ct->u.regs, reg)) {
|
|
|
|
goto oarg_end;
|
|
|
|
}
|
2016-11-18 15:35:03 +08:00
|
|
|
reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
|
2015-08-19 14:23:08 +08:00
|
|
|
ts->indirect_base);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2016-11-18 15:35:03 +08:00
|
|
|
tcg_regset_set_reg(o_allocated_regs, reg);
|
2008-05-24 01:33:39 +08:00
|
|
|
/* if a fixed register is used, then a move will be done afterwards */
|
|
|
|
if (!ts->fixed_reg) {
|
2012-10-10 03:53:07 +08:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
2013-09-19 06:21:56 +08:00
|
|
|
s->reg_to_temp[ts->reg] = NULL;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
|
|
|
ts->val_type = TEMP_VAL_REG;
|
|
|
|
ts->reg = reg;
|
|
|
|
/* temp value is modified, so the value kept in memory is
|
|
|
|
potentially not the same */
|
|
|
|
ts->mem_coherent = 0;
|
2013-09-19 06:21:56 +08:00
|
|
|
s->reg_to_temp[reg] = ts;
|
2008-05-24 01:33:39 +08:00
|
|
|
}
|
|
|
|
oarg_end:
|
|
|
|
new_args[i] = reg;
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* emit instruction */
|
2017-09-15 04:53:46 +08:00
|
|
|
if (def->flags & TCG_OPF_VECTOR) {
|
|
|
|
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
|
|
|
|
new_args, const_args);
|
|
|
|
} else {
|
|
|
|
tcg_out_op(s, op->opc, new_args, const_args);
|
|
|
|
}
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
/* move the outputs in the correct register if needed */
|
|
|
|
for(i = 0; i < nb_oargs; i++) {
|
2017-06-20 14:18:10 +08:00
|
|
|
ts = arg_temp(op->args[i]);
|
2008-02-01 18:05:41 +08:00
|
|
|
reg = new_args[i];
|
|
|
|
if (ts->fixed_reg && ts->reg != reg) {
|
2010-06-03 08:26:55 +08:00
|
|
|
tcg_out_mov(s, ts->type, ts->reg, reg);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2012-10-10 03:53:07 +08:00
|
|
|
if (NEED_SYNC_ARG(i)) {
|
2016-11-18 15:35:03 +08:00
|
|
|
temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
|
2016-06-20 13:59:13 +08:00
|
|
|
} else if (IS_DEAD_ARG(i)) {
|
2013-09-19 06:29:18 +08:00
|
|
|
temp_dead(s, ts);
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-10 18:52:05 +08:00
|
|
|
#ifdef TCG_TARGET_STACK_GROWSUP
|
|
|
|
#define STACK_DIR(x) (-(x))
|
|
|
|
#else
|
|
|
|
#define STACK_DIR(x) (x)
|
|
|
|
#endif
|
|
|
|
|
2016-12-09 05:42:08 +08:00
|
|
|
static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-11-14 20:02:51 +08:00
|
|
|
const int nb_oargs = TCGOP_CALLO(op);
|
|
|
|
const int nb_iargs = TCGOP_CALLI(op);
|
2016-12-09 05:42:08 +08:00
|
|
|
const TCGLifeData arg_life = op->life;
|
2013-09-19 05:54:45 +08:00
|
|
|
int flags, nb_regs, i;
|
|
|
|
TCGReg reg;
|
2014-03-23 11:06:52 +08:00
|
|
|
TCGArg arg;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGTemp *ts;
|
2013-08-21 08:12:38 +08:00
|
|
|
intptr_t stack_offset;
|
|
|
|
size_t call_stack_size;
|
2014-03-23 11:06:52 +08:00
|
|
|
tcg_insn_unit *func_addr;
|
|
|
|
int allocate_args;
|
2008-02-01 18:05:41 +08:00
|
|
|
TCGRegSet allocated_regs;
|
|
|
|
|
2016-12-09 05:42:08 +08:00
|
|
|
func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
|
|
|
|
flags = op->args[nb_oargs + nb_iargs + 1];
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2012-09-14 01:37:46 +08:00
|
|
|
nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
|
2014-09-20 04:49:15 +08:00
|
|
|
if (nb_regs > nb_iargs) {
|
|
|
|
nb_regs = nb_iargs;
|
2014-03-23 11:06:52 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
|
|
|
|
/* assign stack slots first */
|
2014-09-20 04:49:15 +08:00
|
|
|
call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
|
2008-02-01 18:05:41 +08:00
|
|
|
call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
|
|
|
|
~(TCG_TARGET_STACK_ALIGN - 1);
|
2008-05-10 18:52:05 +08:00
|
|
|
allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
|
|
|
|
if (allocate_args) {
|
2011-05-28 15:13:05 +08:00
|
|
|
/* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
|
|
|
|
preallocate call stack */
|
|
|
|
tcg_abort();
|
2008-05-10 18:52:05 +08:00
|
|
|
}
|
2008-05-22 22:59:57 +08:00
|
|
|
|
|
|
|
stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
|
2016-12-09 05:42:08 +08:00
|
|
|
for (i = nb_regs; i < nb_iargs; i++) {
|
|
|
|
arg = op->args[nb_oargs + i];
|
2008-05-22 22:59:57 +08:00
|
|
|
#ifdef TCG_TARGET_STACK_GROWSUP
|
|
|
|
stack_offset -= sizeof(tcg_target_long);
|
|
|
|
#endif
|
|
|
|
if (arg != TCG_CALL_DUMMY_ARG) {
|
2017-06-20 14:18:10 +08:00
|
|
|
ts = arg_temp(arg);
|
2013-09-19 23:02:05 +08:00
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
|
|
|
s->reserved_regs);
|
|
|
|
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2008-05-22 22:59:57 +08:00
|
|
|
#ifndef TCG_TARGET_STACK_GROWSUP
|
|
|
|
stack_offset += sizeof(tcg_target_long);
|
|
|
|
#endif
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* assign input registers */
|
2017-09-12 02:58:44 +08:00
|
|
|
allocated_regs = s->reserved_regs;
|
2016-12-09 05:42:08 +08:00
|
|
|
for (i = 0; i < nb_regs; i++) {
|
|
|
|
arg = op->args[nb_oargs + i];
|
2008-05-22 22:59:57 +08:00
|
|
|
if (arg != TCG_CALL_DUMMY_ARG) {
|
2017-06-20 14:18:10 +08:00
|
|
|
ts = arg_temp(arg);
|
2008-05-22 22:59:57 +08:00
|
|
|
reg = tcg_target_call_iarg_regs[i];
|
2013-09-20 01:36:18 +08:00
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
2013-09-19 23:02:05 +08:00
|
|
|
|
2008-05-22 22:59:57 +08:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
if (ts->reg != reg) {
|
2010-06-03 08:26:55 +08:00
|
|
|
tcg_out_mov(s, ts->type, reg, ts->reg);
|
2008-05-22 22:59:57 +08:00
|
|
|
}
|
|
|
|
} else {
|
2017-09-12 02:25:55 +08:00
|
|
|
TCGRegSet arg_set = 0;
|
2013-09-19 23:02:05 +08:00
|
|
|
|
|
|
|
tcg_regset_set_reg(arg_set, reg);
|
|
|
|
temp_load(s, ts, arg_set, allocated_regs);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2013-09-19 23:02:05 +08:00
|
|
|
|
2008-05-22 22:59:57 +08:00
|
|
|
tcg_regset_set_reg(allocated_regs, reg);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mark dead temporaries and free the associated registers */
|
2016-12-09 05:42:08 +08:00
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2011-05-18 00:25:45 +08:00
|
|
|
if (IS_DEAD_ARG(i)) {
|
2017-06-20 14:18:10 +08:00
|
|
|
temp_dead(s, arg_temp(op->args[i]));
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clobber call registers */
|
2016-02-09 07:43:42 +08:00
|
|
|
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
2013-09-20 01:36:18 +08:00
|
|
|
tcg_reg_free(s, i, allocated_regs);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
2012-10-10 03:53:08 +08:00
|
|
|
|
|
|
|
/* Save globals if they might be written by the helper, sync them if
|
|
|
|
they might be read. */
|
|
|
|
if (flags & TCG_CALL_NO_READ_GLOBALS) {
|
|
|
|
/* Nothing to do */
|
|
|
|
} else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
|
|
|
|
sync_globals(s, allocated_regs);
|
|
|
|
} else {
|
2009-04-06 20:33:59 +08:00
|
|
|
save_globals(s, allocated_regs);
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2014-03-23 11:06:52 +08:00
|
|
|
tcg_out_call(s, func_addr);
|
2008-02-01 18:05:41 +08:00
|
|
|
|
|
|
|
/* assign output registers and emit moves if needed */
|
|
|
|
for(i = 0; i < nb_oargs; i++) {
|
2016-12-09 05:42:08 +08:00
|
|
|
arg = op->args[i];
|
2017-06-20 14:18:10 +08:00
|
|
|
ts = arg_temp(arg);
|
2008-02-01 18:05:41 +08:00
|
|
|
reg = tcg_target_call_oarg_regs[i];
|
2016-04-21 16:48:49 +08:00
|
|
|
tcg_debug_assert(s->reg_to_temp[reg] == NULL);
|
2014-03-05 05:39:48 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
if (ts->fixed_reg) {
|
|
|
|
if (ts->reg != reg) {
|
2010-06-03 08:26:55 +08:00
|
|
|
tcg_out_mov(s, ts->type, ts->reg, reg);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
} else {
|
2012-10-10 03:53:07 +08:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
2013-09-19 06:21:56 +08:00
|
|
|
s->reg_to_temp[ts->reg] = NULL;
|
2012-10-10 03:53:07 +08:00
|
|
|
}
|
|
|
|
ts->val_type = TEMP_VAL_REG;
|
|
|
|
ts->reg = reg;
|
|
|
|
ts->mem_coherent = 0;
|
2013-09-19 06:21:56 +08:00
|
|
|
s->reg_to_temp[reg] = ts;
|
2012-10-10 03:53:07 +08:00
|
|
|
if (NEED_SYNC_ARG(i)) {
|
2016-06-20 13:59:13 +08:00
|
|
|
temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
|
|
|
|
} else if (IS_DEAD_ARG(i)) {
|
2013-09-19 06:29:18 +08:00
|
|
|
temp_dead(s, ts);
|
2011-05-18 00:25:45 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PROFILER
|
|
|
|
|
2017-07-06 07:35:06 +08:00
|
|
|
/* avoid copy/paste errors */
|
|
|
|
#define PROF_ADD(to, from, field) \
|
|
|
|
do { \
|
|
|
|
(to)->field += atomic_read(&((from)->field)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define PROF_MAX(to, from, field) \
|
|
|
|
do { \
|
|
|
|
typeof((from)->field) val__ = atomic_read(&((from)->field)); \
|
|
|
|
if (val__ > (to)->field) { \
|
|
|
|
(to)->field = val__; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* Pass in a zero'ed @prof */
|
|
|
|
static inline
|
|
|
|
void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
|
|
|
|
{
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
|
2017-07-06 07:35:06 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 06:57:58 +08:00
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
|
|
|
TCGContext *s = atomic_read(&tcg_ctxs[i]);
|
|
|
|
const TCGProfile *orig = &s->prof;
|
2017-07-06 07:35:06 +08:00
|
|
|
|
|
|
|
if (counters) {
|
2018-10-10 22:48:53 +08:00
|
|
|
PROF_ADD(prof, orig, cpu_exec_time);
|
2017-07-06 07:35:06 +08:00
|
|
|
PROF_ADD(prof, orig, tb_count1);
|
|
|
|
PROF_ADD(prof, orig, tb_count);
|
|
|
|
PROF_ADD(prof, orig, op_count);
|
|
|
|
PROF_MAX(prof, orig, op_count_max);
|
|
|
|
PROF_ADD(prof, orig, temp_count);
|
|
|
|
PROF_MAX(prof, orig, temp_count_max);
|
|
|
|
PROF_ADD(prof, orig, del_op_count);
|
|
|
|
PROF_ADD(prof, orig, code_in_len);
|
|
|
|
PROF_ADD(prof, orig, code_out_len);
|
|
|
|
PROF_ADD(prof, orig, search_out_len);
|
|
|
|
PROF_ADD(prof, orig, interm_time);
|
|
|
|
PROF_ADD(prof, orig, code_time);
|
|
|
|
PROF_ADD(prof, orig, la_time);
|
|
|
|
PROF_ADD(prof, orig, opt_time);
|
|
|
|
PROF_ADD(prof, orig, restore_count);
|
|
|
|
PROF_ADD(prof, orig, restore_time);
|
|
|
|
}
|
|
|
|
if (table) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NB_OPS; i++) {
|
|
|
|
PROF_ADD(prof, orig, table_op_count[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef PROF_ADD
|
|
|
|
#undef PROF_MAX
|
|
|
|
|
|
|
|
static void tcg_profile_snapshot_counters(TCGProfile *prof)
|
|
|
|
{
|
|
|
|
tcg_profile_snapshot(prof, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_profile_snapshot_table(TCGProfile *prof)
|
|
|
|
{
|
|
|
|
tcg_profile_snapshot(prof, false, true);
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2014-11-02 16:04:18 +08:00
|
|
|
void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-07-06 07:35:06 +08:00
|
|
|
TCGProfile prof = {};
|
2008-02-01 18:05:41 +08:00
|
|
|
int i;
|
2014-08-18 15:58:08 +08:00
|
|
|
|
2017-07-06 07:35:06 +08:00
|
|
|
tcg_profile_snapshot_table(&prof);
|
2014-03-31 11:40:35 +08:00
|
|
|
for (i = 0; i < NB_OPS; i++) {
|
2014-11-02 16:04:18 +08:00
|
|
|
cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
|
2017-07-06 07:35:06 +08:00
|
|
|
prof.table_op_count[i]);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
2018-10-10 22:48:53 +08:00
|
|
|
|
|
|
|
int64_t tcg_cpu_exec_time(void)
|
|
|
|
{
|
|
|
|
unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
|
|
|
|
unsigned int i;
|
|
|
|
int64_t ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
|
|
|
const TCGContext *s = atomic_read(&tcg_ctxs[i]);
|
|
|
|
const TCGProfile *prof = &s->prof;
|
|
|
|
|
|
|
|
ret += atomic_read(&prof->cpu_exec_time);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2014-11-02 16:04:18 +08:00
|
|
|
#else
|
|
|
|
void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
|
|
|
|
{
|
|
|
|
cpu_fprintf(f, "[TCG profiler not compiled]\n");
|
|
|
|
}
|
2018-10-10 22:48:53 +08:00
|
|
|
|
|
|
|
int64_t tcg_cpu_exec_time(void)
|
|
|
|
{
|
|
|
|
error_report("%s: TCG profiler not compiled", __func__);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2016-03-15 22:30:16 +08:00
|
|
|
int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
2008-02-01 18:05:41 +08:00
|
|
|
{
|
2017-07-06 07:35:06 +08:00
|
|
|
#ifdef CONFIG_PROFILER
|
|
|
|
TCGProfile *prof = &s->prof;
|
|
|
|
#endif
|
2017-11-02 22:19:14 +08:00
|
|
|
int i, num_insns;
|
|
|
|
TCGOp *op;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2015-09-02 11:07:48 +08:00
|
|
|
#ifdef CONFIG_PROFILER
|
|
|
|
{
|
2018-10-10 22:48:51 +08:00
|
|
|
int n = 0;
|
2015-09-02 11:07:48 +08:00
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
|
|
|
n++;
|
|
|
|
}
|
2017-07-06 07:35:06 +08:00
|
|
|
atomic_set(&prof->op_count, prof->op_count + n);
|
|
|
|
if (n > prof->op_count_max) {
|
|
|
|
atomic_set(&prof->op_count_max, n);
|
2015-09-02 11:07:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
n = s->nb_temps;
|
2017-07-06 07:35:06 +08:00
|
|
|
atomic_set(&prof->temp_count, prof->temp_count + n);
|
|
|
|
if (n > prof->temp_count_max) {
|
|
|
|
atomic_set(&prof->temp_count_max, n);
|
2015-09-02 11:07:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
#ifdef DEBUG_DISAS
|
2016-03-15 22:30:21 +08:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
|
|
|
|
&& qemu_log_in_addr_range(tb->pc))) {
|
2016-09-23 06:17:10 +08:00
|
|
|
qemu_log_lock();
|
2009-01-16 06:34:14 +08:00
|
|
|
qemu_log("OP:\n");
|
2012-06-04 00:35:32 +08:00
|
|
|
tcg_dump_ops(s);
|
2009-01-16 06:34:14 +08:00
|
|
|
qemu_log("\n");
|
2016-09-23 06:17:10 +08:00
|
|
|
qemu_log_unlock();
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-09-06 22:47:13 +08:00
|
|
|
#ifdef CONFIG_PROFILER
|
2017-07-06 07:35:06 +08:00
|
|
|
atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
|
2012-09-06 22:47:13 +08:00
|
|
|
#endif
|
|
|
|
|
2011-07-07 20:37:12 +08:00
|
|
|
#ifdef USE_TCG_OPTIMIZATIONS
|
2014-09-20 04:49:15 +08:00
|
|
|
tcg_optimize(s);
|
2011-07-07 20:37:12 +08:00
|
|
|
#endif
|
|
|
|
|
2008-05-23 17:52:20 +08:00
|
|
|
#ifdef CONFIG_PROFILER
|
2017-07-06 07:35:06 +08:00
|
|
|
atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
|
|
|
|
atomic_set(&prof->la_time, prof->la_time - profile_getclock());
|
2008-05-23 17:52:20 +08:00
|
|
|
#endif
|
2012-09-06 22:47:13 +08:00
|
|
|
|
2016-11-02 05:56:04 +08:00
|
|
|
liveness_pass_1(s);
|
2016-06-24 11:34:33 +08:00
|
|
|
|
2016-11-02 05:56:04 +08:00
|
|
|
if (s->nb_indirects > 0) {
|
2016-06-24 11:34:33 +08:00
|
|
|
#ifdef DEBUG_DISAS
|
2016-11-02 05:56:04 +08:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
|
|
|
|
&& qemu_log_in_addr_range(tb->pc))) {
|
|
|
|
qemu_log_lock();
|
|
|
|
qemu_log("OP before indirect lowering:\n");
|
|
|
|
tcg_dump_ops(s);
|
|
|
|
qemu_log("\n");
|
|
|
|
qemu_log_unlock();
|
|
|
|
}
|
2016-06-24 11:34:33 +08:00
|
|
|
#endif
|
2016-11-02 05:56:04 +08:00
|
|
|
/* Replace indirect temps with direct temps. */
|
|
|
|
if (liveness_pass_2(s)) {
|
|
|
|
/* If changes were made, re-run liveness. */
|
|
|
|
liveness_pass_1(s);
|
2016-06-24 11:34:33 +08:00
|
|
|
}
|
|
|
|
}
|
2012-09-06 22:47:13 +08:00
|
|
|
|
2008-05-23 17:52:20 +08:00
|
|
|
#ifdef CONFIG_PROFILER
|
2017-07-06 07:35:06 +08:00
|
|
|
atomic_set(&prof->la_time, prof->la_time + profile_getclock());
|
2008-05-23 17:52:20 +08:00
|
|
|
#endif
|
2008-02-01 18:05:41 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG_DISAS
|
2016-03-15 22:30:21 +08:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
|
|
|
|
&& qemu_log_in_addr_range(tb->pc))) {
|
2016-09-23 06:17:10 +08:00
|
|
|
qemu_log_lock();
|
2012-09-06 22:47:13 +08:00
|
|
|
qemu_log("OP after optimization and liveness analysis:\n");
|
2012-06-04 00:35:32 +08:00
|
|
|
tcg_dump_ops(s);
|
2009-01-16 06:34:14 +08:00
|
|
|
qemu_log("\n");
|
2016-09-23 06:17:10 +08:00
|
|
|
qemu_log_unlock();
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
tcg_reg_alloc_start(s);
|
|
|
|
|
2017-07-12 12:08:21 +08:00
|
|
|
s->code_buf = tb->tc.ptr;
|
|
|
|
s->code_ptr = tb->tc.ptr;
|
2008-02-01 18:05:41 +08:00
|
|
|
|
2017-07-31 03:30:41 +08:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
2018-04-30 07:58:40 +08:00
|
|
|
QSIMPLEQ_INIT(&s->ldst_labels);
|
2017-07-31 03:30:41 +08:00
|
|
|
#endif
|
2017-07-31 04:13:21 +08:00
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
s->pool_labels = NULL;
|
|
|
|
#endif
|
2013-10-04 03:51:24 +08:00
|
|
|
|
2015-09-02 10:11:45 +08:00
|
|
|
num_insns = -1;
|
2017-11-02 22:19:14 +08:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
2014-09-20 04:49:15 +08:00
|
|
|
TCGOpcode opc = op->opc;
|
2008-03-08 21:33:42 +08:00
|
|
|
|
2008-02-01 18:05:41 +08:00
|
|
|
#ifdef CONFIG_PROFILER
|
2017-07-06 07:35:06 +08:00
|
|
|
atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
|
2008-02-01 18:05:41 +08:00
|
|
|
#endif
|
2014-09-20 04:49:15 +08:00
|
|
|
|
|
|
|
switch (opc) {
|
2008-02-01 18:05:41 +08:00
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_mov_i64:
|
2017-09-15 04:53:46 +08:00
|
|
|
case INDEX_op_mov_vec:
|
2016-12-09 05:42:08 +08:00
|
|
|
tcg_reg_alloc_mov(s, op);
|
2008-02-01 18:05:41 +08:00
|
|
|
break;
|
2008-05-24 01:33:39 +08:00
|
|
|
case INDEX_op_movi_i32:
|
|
|
|
case INDEX_op_movi_i64:
|
2017-09-15 04:53:46 +08:00
|
|
|
case INDEX_op_dupi_vec:
|
2016-12-09 05:42:08 +08:00
|
|
|
tcg_reg_alloc_movi(s, op);
|
2008-05-24 01:33:39 +08:00
|
|
|
break;
|
2015-08-30 03:37:33 +08:00
|
|
|
case INDEX_op_insn_start:
|
2015-09-02 10:11:45 +08:00
|
|
|
if (num_insns >= 0) {
|
2018-06-15 13:57:03 +08:00
|
|
|
size_t off = tcg_current_code_size(s);
|
|
|
|
s->gen_insn_end_off[num_insns] = off;
|
|
|
|
/* Assert that we do not overflow our stored offset. */
|
|
|
|
assert(s->gen_insn_end_off[num_insns] == off);
|
2015-09-02 10:11:45 +08:00
|
|
|
}
|
|
|
|
num_insns++;
|
2015-09-02 06:51:12 +08:00
|
|
|
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
|
|
|
|
target_ulong a;
|
|
|
|
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
2016-12-09 05:12:08 +08:00
|
|
|
a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
|
2015-09-02 06:51:12 +08:00
|
|
|
#else
|
2016-12-09 05:12:08 +08:00
|
|
|
a = op->args[i];
|
2015-09-02 06:51:12 +08:00
|
|
|
#endif
|
2015-09-02 10:11:45 +08:00
|
|
|
s->gen_insn_data[num_insns][i] = a;
|
2015-09-02 06:51:12 +08:00
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
break;
|
2008-02-04 08:37:54 +08:00
|
|
|
case INDEX_op_discard:
|
2017-06-20 14:18:10 +08:00
|
|
|
temp_dead(s, arg_temp(op->args[0]));
|
2008-02-04 08:37:54 +08:00
|
|
|
break;
|
2008-02-01 18:05:41 +08:00
|
|
|
case INDEX_op_set_label:
|
2008-05-24 01:33:39 +08:00
|
|
|
tcg_reg_alloc_bb_end(s, s->reserved_regs);
|
2016-12-09 05:12:08 +08:00
|
|
|
tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
|
2008-02-01 18:05:41 +08:00
|
|
|
break;
|
|
|
|
case INDEX_op_call:
|
2016-12-09 05:42:08 +08:00
|
|
|
tcg_reg_alloc_call(s, op);
|
2014-09-20 04:49:15 +08:00
|
|
|
break;
|
2008-02-01 18:05:41 +08:00
|
|
|
default:
|
2011-08-18 05:11:46 +08:00
|
|
|
/* Sanity check that we've not introduced any unhandled opcodes. */
|
2017-08-17 22:43:20 +08:00
|
|
|
tcg_debug_assert(tcg_op_supported(opc));
|
2008-02-01 18:05:41 +08:00
|
|
|
/* Note: in order to speed up the code, it would be much
|
|
|
|
faster to have specialized register allocator functions for
|
|
|
|
some common argument patterns */
|
2016-12-09 05:42:08 +08:00
|
|
|
tcg_reg_alloc_op(s, op);
|
2008-02-01 18:05:41 +08:00
|
|
|
break;
|
|
|
|
}
|
2016-04-21 16:48:50 +08:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
2008-02-01 18:05:41 +08:00
|
|
|
check_regs(s);
|
|
|
|
#endif
|
2015-09-23 04:01:15 +08:00
|
|
|
/* Test for (pending) buffer overflow. The assumption is that any
|
|
|
|
one operation beginning below the high water mark cannot overrun
|
|
|
|
the buffer completely. Thus we can test for overflow after
|
|
|
|
generating code without having to check during generation. */
|
2015-11-19 17:30:50 +08:00
|
|
|
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
|
2015-09-23 04:01:15 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
2015-09-02 10:11:45 +08:00
|
|
|
tcg_debug_assert(num_insns >= 0);
|
|
|
|
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
|
2014-09-20 04:49:15 +08:00
|
|
|
|
2012-10-31 15:04:25 +08:00
|
|
|
/* Generate TB finalization at the end of block */
|
2017-07-31 03:30:41 +08:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
|
|
|
if (!tcg_out_ldst_finalize(s)) {
|
2015-12-03 05:59:59 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2017-07-31 03:30:41 +08:00
|
|
|
#endif
|
2017-07-31 04:13:21 +08:00
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
if (!tcg_out_pool_finalize(s)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2008-02-01 18:05:41 +08:00
|
|
|
|
|
|
|
/* flush instruction cache */
|
2014-03-29 03:56:22 +08:00
|
|
|
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
|
2012-03-03 06:30:07 +08:00
|
|
|
|
2014-03-29 03:56:22 +08:00
|
|
|
return tcg_current_code_size(s);
|
2008-02-01 18:05:41 +08:00
|
|
|
}
|
|
|
|
|
2008-05-23 17:52:20 +08:00
|
|
|
#ifdef CONFIG_PROFILER
|
2010-10-23 05:03:31 +08:00
|
|
|
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
|
2008-05-23 17:52:20 +08:00
|
|
|
{
|
2017-07-06 07:35:06 +08:00
|
|
|
TCGProfile prof = {};
|
|
|
|
const TCGProfile *s;
|
|
|
|
int64_t tb_count;
|
|
|
|
int64_t tb_div_count;
|
|
|
|
int64_t tot;
|
|
|
|
|
|
|
|
tcg_profile_snapshot_counters(&prof);
|
|
|
|
s = &prof;
|
|
|
|
tb_count = s->tb_count;
|
|
|
|
tb_div_count = tb_count ? tb_count : 1;
|
|
|
|
tot = s->interm_time + s->code_time;
|
2008-05-23 17:52:20 +08:00
|
|
|
|
|
|
|
cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
|
|
|
|
tot, tot / 2.4e9);
|
|
|
|
cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
|
2015-09-02 10:11:45 +08:00
|
|
|
tb_count, s->tb_count1 - tb_count,
|
|
|
|
(double)(s->tb_count1 - s->tb_count)
|
|
|
|
/ (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
|
2008-05-23 17:52:20 +08:00
|
|
|
cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
|
2015-09-02 10:11:45 +08:00
|
|
|
(double)s->op_count / tb_div_count, s->op_count_max);
|
2008-05-23 17:52:20 +08:00
|
|
|
cpu_fprintf(f, "deleted ops/TB %0.2f\n",
|
2015-09-02 10:11:45 +08:00
|
|
|
(double)s->del_op_count / tb_div_count);
|
2008-05-23 17:52:20 +08:00
|
|
|
cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
|
2015-09-02 10:11:45 +08:00
|
|
|
(double)s->temp_count / tb_div_count, s->temp_count_max);
|
|
|
|
cpu_fprintf(f, "avg host code/TB %0.1f\n",
|
|
|
|
(double)s->code_out_len / tb_div_count);
|
|
|
|
cpu_fprintf(f, "avg search data/TB %0.1f\n",
|
|
|
|
(double)s->search_out_len / tb_div_count);
|
2008-05-23 17:52:20 +08:00
|
|
|
|
|
|
|
cpu_fprintf(f, "cycles/op %0.1f\n",
|
|
|
|
s->op_count ? (double)tot / s->op_count : 0);
|
|
|
|
cpu_fprintf(f, "cycles/in byte %0.1f\n",
|
|
|
|
s->code_in_len ? (double)tot / s->code_in_len : 0);
|
|
|
|
cpu_fprintf(f, "cycles/out byte %0.1f\n",
|
|
|
|
s->code_out_len ? (double)tot / s->code_out_len : 0);
|
2015-09-02 10:11:45 +08:00
|
|
|
cpu_fprintf(f, "cycles/search byte %0.1f\n",
|
|
|
|
s->search_out_len ? (double)tot / s->search_out_len : 0);
|
|
|
|
if (tot == 0) {
|
2008-05-23 17:52:20 +08:00
|
|
|
tot = 1;
|
2015-09-02 10:11:45 +08:00
|
|
|
}
|
2008-05-23 17:52:20 +08:00
|
|
|
cpu_fprintf(f, " gen_interm time %0.1f%%\n",
|
|
|
|
(double)s->interm_time / tot * 100.0);
|
|
|
|
cpu_fprintf(f, " gen_code time %0.1f%%\n",
|
|
|
|
(double)s->code_time / tot * 100.0);
|
2012-09-06 22:47:13 +08:00
|
|
|
cpu_fprintf(f, "optim./code time %0.1f%%\n",
|
|
|
|
(double)s->opt_time / (s->code_time ? s->code_time : 1)
|
|
|
|
* 100.0);
|
2008-05-23 17:52:20 +08:00
|
|
|
cpu_fprintf(f, "liveness/code time %0.1f%%\n",
|
|
|
|
(double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
|
|
|
|
cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
|
|
|
|
s->restore_count);
|
|
|
|
cpu_fprintf(f, " avg cycles %0.1f\n",
|
|
|
|
s->restore_count ? (double)s->restore_time / s->restore_count : 0);
|
|
|
|
}
|
|
|
|
#else
|
2010-10-23 05:03:31 +08:00
|
|
|
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
|
2008-05-23 17:52:20 +08:00
|
|
|
{
|
2008-05-23 19:58:32 +08:00
|
|
|
cpu_fprintf(f, "[TCG profiler not compiled]\n");
|
2008-05-23 17:52:20 +08:00
|
|
|
}
|
|
|
|
#endif
|
2012-03-20 03:25:11 +08:00
|
|
|
|
|
|
|
#ifdef ELF_HOST_MACHINE
|
2012-03-25 01:47:36 +08:00
|
|
|
/* In order to use this feature, the backend needs to do three things:
|
|
|
|
|
|
|
|
(1) Define ELF_HOST_MACHINE to indicate both what value to
|
|
|
|
put into the ELF image and to indicate support for the feature.
|
|
|
|
|
|
|
|
(2) Define tcg_register_jit. This should create a buffer containing
|
|
|
|
the contents of a .debug_frame section that describes the post-
|
|
|
|
prologue unwind info for the tcg machine.
|
|
|
|
|
|
|
|
(3) Call tcg_register_jit_int, with the constructed .debug_frame.
|
|
|
|
*/
|
2012-03-20 03:25:11 +08:00
|
|
|
|
|
|
|
/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
|
|
|
|
typedef enum {
|
|
|
|
JIT_NOACTION = 0,
|
|
|
|
JIT_REGISTER_FN,
|
|
|
|
JIT_UNREGISTER_FN
|
|
|
|
} jit_actions_t;
|
|
|
|
|
|
|
|
struct jit_code_entry {
|
|
|
|
struct jit_code_entry *next_entry;
|
|
|
|
struct jit_code_entry *prev_entry;
|
|
|
|
const void *symfile_addr;
|
|
|
|
uint64_t symfile_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct jit_descriptor {
|
|
|
|
uint32_t version;
|
|
|
|
uint32_t action_flag;
|
|
|
|
struct jit_code_entry *relevant_entry;
|
|
|
|
struct jit_code_entry *first_entry;
|
|
|
|
};
|
|
|
|
|
|
|
|
void __jit_debug_register_code(void) __attribute__((noinline));
|
|
|
|
void __jit_debug_register_code(void)
|
|
|
|
{
|
|
|
|
asm("");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must statically initialize the version, because GDB may check
|
|
|
|
the version before we can set it. */
|
|
|
|
struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
|
|
|
|
|
|
|
|
/* End GDB interface. */
|
|
|
|
|
|
|
|
static int find_string(const char *strtab, const char *str)
|
|
|
|
{
|
|
|
|
const char *p = strtab + 1;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (strcmp(p, str) == 0) {
|
|
|
|
return p - strtab;
|
|
|
|
}
|
|
|
|
p += strlen(p) + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-25 01:47:36 +08:00
|
|
|
static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
|
2014-05-16 03:48:01 +08:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-20 03:25:11 +08:00
|
|
|
{
|
2012-03-25 01:47:36 +08:00
|
|
|
struct __attribute__((packed)) DebugInfo {
|
|
|
|
uint32_t len;
|
|
|
|
uint16_t version;
|
|
|
|
uint32_t abbrev;
|
|
|
|
uint8_t ptr_size;
|
|
|
|
uint8_t cu_die;
|
|
|
|
uint16_t cu_lang;
|
|
|
|
uintptr_t cu_low_pc;
|
|
|
|
uintptr_t cu_high_pc;
|
|
|
|
uint8_t fn_die;
|
|
|
|
char fn_name[16];
|
|
|
|
uintptr_t fn_low_pc;
|
|
|
|
uintptr_t fn_high_pc;
|
|
|
|
uint8_t cu_eoc;
|
|
|
|
};
|
2012-03-20 03:25:11 +08:00
|
|
|
|
|
|
|
struct ElfImage {
|
|
|
|
ElfW(Ehdr) ehdr;
|
|
|
|
ElfW(Phdr) phdr;
|
2012-03-25 01:47:36 +08:00
|
|
|
ElfW(Shdr) shdr[7];
|
|
|
|
ElfW(Sym) sym[2];
|
|
|
|
struct DebugInfo di;
|
|
|
|
uint8_t da[24];
|
|
|
|
char str[80];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ElfImage *img;
|
|
|
|
|
|
|
|
static const struct ElfImage img_template = {
|
|
|
|
.ehdr = {
|
|
|
|
.e_ident[EI_MAG0] = ELFMAG0,
|
|
|
|
.e_ident[EI_MAG1] = ELFMAG1,
|
|
|
|
.e_ident[EI_MAG2] = ELFMAG2,
|
|
|
|
.e_ident[EI_MAG3] = ELFMAG3,
|
|
|
|
.e_ident[EI_CLASS] = ELF_CLASS,
|
|
|
|
.e_ident[EI_DATA] = ELF_DATA,
|
|
|
|
.e_ident[EI_VERSION] = EV_CURRENT,
|
|
|
|
.e_type = ET_EXEC,
|
|
|
|
.e_machine = ELF_HOST_MACHINE,
|
|
|
|
.e_version = EV_CURRENT,
|
|
|
|
.e_phoff = offsetof(struct ElfImage, phdr),
|
|
|
|
.e_shoff = offsetof(struct ElfImage, shdr),
|
|
|
|
.e_ehsize = sizeof(ElfW(Shdr)),
|
|
|
|
.e_phentsize = sizeof(ElfW(Phdr)),
|
|
|
|
.e_phnum = 1,
|
|
|
|
.e_shentsize = sizeof(ElfW(Shdr)),
|
|
|
|
.e_shnum = ARRAY_SIZE(img->shdr),
|
|
|
|
.e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
|
2012-03-25 01:47:37 +08:00
|
|
|
#ifdef ELF_HOST_FLAGS
|
|
|
|
.e_flags = ELF_HOST_FLAGS,
|
|
|
|
#endif
|
|
|
|
#ifdef ELF_OSABI
|
|
|
|
.e_ident[EI_OSABI] = ELF_OSABI,
|
|
|
|
#endif
|
2012-03-25 01:47:36 +08:00
|
|
|
},
|
|
|
|
.phdr = {
|
|
|
|
.p_type = PT_LOAD,
|
|
|
|
.p_flags = PF_X,
|
|
|
|
},
|
|
|
|
.shdr = {
|
|
|
|
[0] = { .sh_type = SHT_NULL },
|
|
|
|
/* Trick: The contents of code_gen_buffer are not present in
|
|
|
|
this fake ELF file; that got allocated elsewhere. Therefore
|
|
|
|
we mark .text as SHT_NOBITS (similar to .bss) so that readers
|
|
|
|
will not look for contents. We can record any address. */
|
|
|
|
[1] = { /* .text */
|
|
|
|
.sh_type = SHT_NOBITS,
|
|
|
|
.sh_flags = SHF_EXECINSTR | SHF_ALLOC,
|
|
|
|
},
|
|
|
|
[2] = { /* .debug_info */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, di),
|
|
|
|
.sh_size = sizeof(struct DebugInfo),
|
|
|
|
},
|
|
|
|
[3] = { /* .debug_abbrev */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, da),
|
|
|
|
.sh_size = sizeof(img->da),
|
|
|
|
},
|
|
|
|
[4] = { /* .debug_frame */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = sizeof(struct ElfImage),
|
|
|
|
},
|
|
|
|
[5] = { /* .symtab */
|
|
|
|
.sh_type = SHT_SYMTAB,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, sym),
|
|
|
|
.sh_size = sizeof(img->sym),
|
|
|
|
.sh_info = 1,
|
|
|
|
.sh_link = ARRAY_SIZE(img->shdr) - 1,
|
|
|
|
.sh_entsize = sizeof(ElfW(Sym)),
|
|
|
|
},
|
|
|
|
[6] = { /* .strtab */
|
|
|
|
.sh_type = SHT_STRTAB,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, str),
|
|
|
|
.sh_size = sizeof(img->str),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
.sym = {
|
|
|
|
[1] = { /* code_gen_buffer */
|
|
|
|
.st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
|
|
|
|
.st_shndx = 1,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
.di = {
|
|
|
|
.len = sizeof(struct DebugInfo) - 4,
|
|
|
|
.version = 2,
|
|
|
|
.ptr_size = sizeof(void *),
|
|
|
|
.cu_die = 1,
|
|
|
|
.cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
|
|
|
|
.fn_die = 2,
|
|
|
|
.fn_name = "code_gen_buffer"
|
|
|
|
},
|
|
|
|
.da = {
|
|
|
|
1, /* abbrev number (the cu) */
|
|
|
|
0x11, 1, /* DW_TAG_compile_unit, has children */
|
|
|
|
0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
|
|
|
|
0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
|
|
|
|
0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
|
|
|
|
0, 0, /* end of abbrev */
|
|
|
|
2, /* abbrev number (the fn) */
|
|
|
|
0x2e, 0, /* DW_TAG_subprogram, no children */
|
|
|
|
0x3, 0x8, /* DW_AT_name, DW_FORM_string */
|
|
|
|
0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
|
|
|
|
0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
|
|
|
|
0, 0, /* end of abbrev */
|
|
|
|
0 /* no more abbrev */
|
|
|
|
},
|
|
|
|
.str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
|
|
|
|
".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
|
2012-03-20 03:25:11 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* We only need a single jit entry; statically allocate it. */
|
|
|
|
static struct jit_code_entry one_entry;
|
|
|
|
|
2012-03-25 01:47:36 +08:00
|
|
|
uintptr_t buf = (uintptr_t)buf_ptr;
|
2012-03-20 03:25:11 +08:00
|
|
|
size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
|
2014-05-16 03:48:01 +08:00
|
|
|
DebugFrameHeader *dfh;
|
2012-03-20 03:25:11 +08:00
|
|
|
|
2012-03-25 01:47:36 +08:00
|
|
|
img = g_malloc(img_size);
|
|
|
|
*img = img_template;
|
2012-03-20 03:25:11 +08:00
|
|
|
|
2012-03-25 01:47:36 +08:00
|
|
|
img->phdr.p_vaddr = buf;
|
|
|
|
img->phdr.p_paddr = buf;
|
|
|
|
img->phdr.p_memsz = buf_size;
|
2012-03-20 03:25:11 +08:00
|
|
|
|
|
|
|
img->shdr[1].sh_name = find_string(img->str, ".text");
|
2012-03-25 01:47:36 +08:00
|
|
|
img->shdr[1].sh_addr = buf;
|
2012-03-20 03:25:11 +08:00
|
|
|
img->shdr[1].sh_size = buf_size;
|
|
|
|
|
2012-03-25 01:47:36 +08:00
|
|
|
img->shdr[2].sh_name = find_string(img->str, ".debug_info");
|
|
|
|
img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
|
|
|
|
|
|
|
|
img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
|
|
|
|
img->shdr[4].sh_size = debug_frame_size;
|
|
|
|
|
|
|
|
img->shdr[5].sh_name = find_string(img->str, ".symtab");
|
|
|
|
img->shdr[6].sh_name = find_string(img->str, ".strtab");
|
|
|
|
|
|
|
|
img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
|
|
|
|
img->sym[1].st_value = buf;
|
|
|
|
img->sym[1].st_size = buf_size;
|
2012-03-20 03:25:11 +08:00
|
|
|
|
2012-03-25 01:47:36 +08:00
|
|
|
img->di.cu_low_pc = buf;
|
2013-05-25 05:16:14 +08:00
|
|
|
img->di.cu_high_pc = buf + buf_size;
|
2012-03-25 01:47:36 +08:00
|
|
|
img->di.fn_low_pc = buf;
|
2013-05-25 05:16:14 +08:00
|
|
|
img->di.fn_high_pc = buf + buf_size;
|
2012-03-20 03:25:11 +08:00
|
|
|
|
2014-05-16 03:48:01 +08:00
|
|
|
dfh = (DebugFrameHeader *)(img + 1);
|
|
|
|
memcpy(dfh, debug_frame, debug_frame_size);
|
|
|
|
dfh->fde.func_start = buf;
|
|
|
|
dfh->fde.func_len = buf_size;
|
|
|
|
|
2012-03-20 03:25:11 +08:00
|
|
|
#ifdef DEBUG_JIT
|
|
|
|
/* Enable this block to be able to debug the ELF image file creation.
|
|
|
|
One can use readelf, objdump, or other inspection utilities. */
|
|
|
|
{
|
|
|
|
FILE *f = fopen("/tmp/qemu.jit", "w+b");
|
|
|
|
if (f) {
|
2012-03-25 01:47:36 +08:00
|
|
|
if (fwrite(img, img_size, 1, f) != img_size) {
|
2012-03-20 03:25:11 +08:00
|
|
|
/* Avoid stupid unused return value warning for fwrite. */
|
|
|
|
}
|
|
|
|
fclose(f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
one_entry.symfile_addr = img;
|
|
|
|
one_entry.symfile_size = img_size;
|
|
|
|
|
|
|
|
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
|
|
|
|
__jit_debug_descriptor.relevant_entry = &one_entry;
|
|
|
|
__jit_debug_descriptor.first_entry = &one_entry;
|
|
|
|
__jit_debug_register_code();
|
|
|
|
}
|
|
|
|
#else
|
2012-03-25 01:47:36 +08:00
|
|
|
/* No support for the feature. Provide the entry point expected by exec.c,
|
|
|
|
and implement the internal function we declared earlier. */
|
2012-03-20 03:25:11 +08:00
|
|
|
|
|
|
|
static void tcg_register_jit_int(void *buf, size_t size,
|
2014-05-16 03:48:01 +08:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-20 03:25:11 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_register_jit(void *buf, size_t buf_size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* ELF_HOST_MACHINE */
|
2017-09-16 05:11:45 +08:00
|
|
|
|
|
|
|
#if !TCG_TARGET_MAYBE_vec
|
|
|
|
void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|