tcg/mips: Support unaligned access for softmmu

We can use the routines just added for user-only to emit
unaligned accesses in softmmu mode too.

Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-08-06 06:55:14 -10:00
parent 23a79c113e
commit d9e5283465

View File

@ -1134,8 +1134,10 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
tcg_insn_unit *label_ptr[2], bool is_load)
{
MemOp opc = get_memop(oi);
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
unsigned s_bits = opc & MO_SIZE;
unsigned a_mask = (1 << a_bits) - 1;
unsigned s_mask = (1 << s_bits) - 1;
int mem_index = get_mmuidx(oi);
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
@ -1143,7 +1145,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
int add_off = offsetof(CPUTLBEntry, addend);
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
target_ulong mask;
target_ulong tlb_mask;
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
@ -1157,27 +1159,13 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
/* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
/* We don't currently support unaligned accesses.
We could do so with mips32r6. */
if (a_bits < s_bits) {
a_bits = s_bits;
}
/* Mask the page bits, keeping the alignment bits to compare against. */
mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
/* Load the (low-half) tlb comparator. */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask);
tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
} else {
tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD
: TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
TCG_TMP0, TCG_TMP3, cmp_off);
tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask);
/* No second compare is required here;
load the tlb addend for the fast path. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
}
/* Zero extend a 32-bit guest address for a 64-bit host. */
@ -1185,7 +1173,25 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
tcg_out_ext32u(s, base, addrl);
addrl = base;
}
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
/*
* Mask the page bits, keeping the alignment bits to compare against.
* For unaligned accesses, compare against the end of the access to
* verify that it does not cross a page boundary.
*/
tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask);
if (a_mask >= s_mask) {
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
} else {
tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrl, s_mask - a_mask);
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
}
if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
/* Load the tlb addend for the fast path. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
}
label_ptr[0] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
@ -1193,7 +1199,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
/* Load and test the high half tlb comparator. */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
/* delay slot */
tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
/* Load the tlb addend for the fast path. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
@ -1515,8 +1521,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
}
}
static void __attribute__((unused))
tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, MemOp opc, bool is_64)
{
const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
@ -1645,8 +1650,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#else
unsigned a_bits, s_bits;
#endif
unsigned a_bits, s_bits;
TCGReg base = TCG_REG_A0;
data_regl = *args++;
@ -1655,10 +1660,20 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
a_bits = get_alignment_bits(opc);
s_bits = opc & MO_SIZE;
/*
* R6 removes the left/right instructions but requires the
* system to support misaligned memory accesses.
*/
#if defined(CONFIG_SOFTMMU)
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
if (use_mips32r6_instructions || a_bits >= s_bits) {
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
} else {
tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
}
add_qemu_ldst_label(s, 1, oi,
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
data_regl, data_regh, addr_regl, addr_regh,
@ -1675,12 +1690,6 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
} else {
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
}
a_bits = get_alignment_bits(opc);
s_bits = opc & MO_SIZE;
/*
* R6 removes the left/right instructions but requires the
* system to support misaligned memory accesses.
*/
if (use_mips32r6_instructions) {
if (a_bits) {
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
@ -1760,8 +1769,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
}
}
static void __attribute__((unused))
tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, MemOp opc)
{
const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
@ -1841,9 +1849,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#else
unsigned a_bits, s_bits;
#endif
unsigned a_bits, s_bits;
TCGReg base = TCG_REG_A0;
data_regl = *args++;
@ -1852,10 +1859,20 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
a_bits = get_alignment_bits(opc);
s_bits = opc & MO_SIZE;
/*
* R6 removes the left/right instructions but requires the
* system to support misaligned memory accesses.
*/
#if defined(CONFIG_SOFTMMU)
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
if (use_mips32r6_instructions || a_bits >= s_bits) {
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
} else {
tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
}
add_qemu_ldst_label(s, 0, oi,
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
data_regl, data_regh, addr_regl, addr_regh,
@ -1872,12 +1889,6 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
} else {
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
}
a_bits = get_alignment_bits(opc);
s_bits = opc & MO_SIZE;
/*
* R6 removes the left/right instructions but requires the
* system to support misaligned memory accesses.
*/
if (use_mips32r6_instructions) {
if (a_bits) {
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);