qemu/target/ppc/internal.h
Mark Cave-Ayland 8a14d31b00 target/ppc: switch fpr/vsrl registers so all VSX registers are in host endian order
When VSX support was initially added, the fpr registers were added at
offset 0 of the VSR register and the vsrl registers were added at offset
1. This is in contrast to the VMX registers (the last 32 VSX registers) which
are stored in host-endian order.

Switch the fpr/vsrl registers so that the lower 32 VSX registers are now also
stored in host endian order to match the VMX registers. This ensures that TCG
vector operations involving mixed VMX and VSX registers will function
correctly.

Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20190307180520.13868-7-mark.cave-ayland@ilande.co.uk>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2019-03-12 14:33:04 +11:00

228 lines
8.0 KiB
C

/*
* PowerPC interal definitions for qemu.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PPC_INTERNAL_H
#define PPC_INTERNAL_H
#define FUNC_MASK(name, ret_type, size, max_val) \
static inline ret_type name(uint##size##_t start, \
uint##size##_t end) \
{ \
ret_type ret, max_bit = size - 1; \
\
if (likely(start == 0)) { \
ret = max_val << (max_bit - end); \
} else if (likely(end == max_bit)) { \
ret = max_val >> start; \
} else { \
ret = (((uint##size##_t)(-1ULL)) >> (start)) ^ \
(((uint##size##_t)(-1ULL) >> (end)) >> 1); \
if (unlikely(start > end)) { \
return ~ret; \
} \
} \
\
return ret; \
}
#if defined(TARGET_PPC64)
FUNC_MASK(MASK, target_ulong, 64, UINT64_MAX);
#else
FUNC_MASK(MASK, target_ulong, 32, UINT32_MAX);
#endif
FUNC_MASK(mask_u32, uint32_t, 32, UINT32_MAX);
FUNC_MASK(mask_u64, uint64_t, 64, UINT64_MAX);
/*****************************************************************************/
/*** Instruction decoding ***/
#define EXTRACT_HELPER(name, shift, nb) \
static inline uint32_t name(uint32_t opcode) \
{ \
return extract32(opcode, shift, nb); \
}
#define EXTRACT_SHELPER(name, shift, nb) \
static inline int32_t name(uint32_t opcode) \
{ \
return sextract32(opcode, shift, nb); \
}
#define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
static inline uint32_t name(uint32_t opcode) \
{ \
return extract32(opcode, shift1, nb1) << nb2 | \
extract32(opcode, shift2, nb2); \
}
#define EXTRACT_HELPER_SPLIT_3(name, \
d0_bits, shift_op_d0, shift_d0, \
d1_bits, shift_op_d1, shift_d1, \
d2_bits, shift_op_d2, shift_d2) \
static inline int16_t name(uint32_t opcode) \
{ \
return \
(((opcode >> (shift_op_d0)) & ((1 << (d0_bits)) - 1)) << (shift_d0)) | \
(((opcode >> (shift_op_d1)) & ((1 << (d1_bits)) - 1)) << (shift_d1)) | \
(((opcode >> (shift_op_d2)) & ((1 << (d2_bits)) - 1)) << (shift_d2)); \
}
/* Opcode part 1 */
EXTRACT_HELPER(opc1, 26, 6);
/* Opcode part 2 */
EXTRACT_HELPER(opc2, 1, 5);
/* Opcode part 3 */
EXTRACT_HELPER(opc3, 6, 5);
/* Opcode part 4 */
EXTRACT_HELPER(opc4, 16, 5);
/* Update Cr0 flags */
EXTRACT_HELPER(Rc, 0, 1);
/* Update Cr6 flags (Altivec) */
EXTRACT_HELPER(Rc21, 10, 1);
/* Destination */
EXTRACT_HELPER(rD, 21, 5);
/* Source */
EXTRACT_HELPER(rS, 21, 5);
/* First operand */
EXTRACT_HELPER(rA, 16, 5);
/* Second operand */
EXTRACT_HELPER(rB, 11, 5);
/* Third operand */
EXTRACT_HELPER(rC, 6, 5);
/*** Get CRn ***/
EXTRACT_HELPER(crfD, 23, 3);
EXTRACT_HELPER(BF, 23, 3);
EXTRACT_HELPER(crfS, 18, 3);
EXTRACT_HELPER(crbD, 21, 5);
EXTRACT_HELPER(crbA, 16, 5);
EXTRACT_HELPER(crbB, 11, 5);
/* SPR / TBL */
EXTRACT_HELPER(_SPR, 11, 10);
static inline uint32_t SPR(uint32_t opcode)
{
uint32_t sprn = _SPR(opcode);
return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
}
/*** Get constants ***/
/* 16 bits signed immediate value */
EXTRACT_SHELPER(SIMM, 0, 16);
/* 16 bits unsigned immediate value */
EXTRACT_HELPER(UIMM, 0, 16);
/* 5 bits signed immediate value */
EXTRACT_SHELPER(SIMM5, 16, 5);
/* 5 bits signed immediate value */
EXTRACT_HELPER(UIMM5, 16, 5);
/* 4 bits unsigned immediate value */
EXTRACT_HELPER(UIMM4, 16, 4);
/* Bit count */
EXTRACT_HELPER(NB, 11, 5);
/* Shift count */
EXTRACT_HELPER(SH, 11, 5);
/* lwat/stwat/ldat/lwat */
EXTRACT_HELPER(FC, 11, 5);
/* Vector shift count */
EXTRACT_HELPER(VSH, 6, 4);
/* Mask start */
EXTRACT_HELPER(MB, 6, 5);
/* Mask end */
EXTRACT_HELPER(ME, 1, 5);
/* Trap operand */
EXTRACT_HELPER(TO, 21, 5);
EXTRACT_HELPER(CRM, 12, 8);
#ifndef CONFIG_USER_ONLY
EXTRACT_HELPER(SR, 16, 4);
#endif
/* mtfsf/mtfsfi */
EXTRACT_HELPER(FPBF, 23, 3);
EXTRACT_HELPER(FPIMM, 12, 4);
EXTRACT_HELPER(FPL, 25, 1);
EXTRACT_HELPER(FPFLM, 17, 8);
EXTRACT_HELPER(FPW, 16, 1);
/* addpcis */
EXTRACT_HELPER_SPLIT_3(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
#if defined(TARGET_PPC64)
/* darn */
EXTRACT_HELPER(L, 16, 2);
#endif
/*** Jump target decoding ***/
/* Immediate address */
static inline target_ulong LI(uint32_t opcode)
{
return (opcode >> 0) & 0x03FFFFFC;
}
static inline uint32_t BD(uint32_t opcode)
{
return (opcode >> 0) & 0xFFFC;
}
EXTRACT_HELPER(BO, 21, 5);
EXTRACT_HELPER(BI, 16, 5);
/* Absolute/relative address */
EXTRACT_HELPER(AA, 1, 1);
/* Link */
EXTRACT_HELPER(LK, 0, 1);
/* DFP Z22-form */
EXTRACT_HELPER(DCM, 10, 6)
/* DFP Z23-form */
EXTRACT_HELPER(RMC, 9, 2)
EXTRACT_HELPER(Rrm, 16, 1)
EXTRACT_HELPER_SPLIT(DQxT, 3, 1, 21, 5);
EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
EXTRACT_HELPER(DM, 8, 2);
EXTRACT_HELPER(UIM, 16, 2);
EXTRACT_HELPER(SHW, 8, 2);
EXTRACT_HELPER(SP, 19, 2);
EXTRACT_HELPER(IMM8, 11, 8);
EXTRACT_HELPER(DCMX, 16, 7);
EXTRACT_HELPER_SPLIT_3(DCMX_XV, 5, 16, 0, 1, 2, 5, 1, 6, 6);
static inline void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
{
vsr->VsrD(0) = env->vsr[n].VsrD(0);
vsr->VsrD(1) = env->vsr[n].VsrD(1);
}
static inline void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
{
env->vsr[n].VsrD(0) = vsr->VsrD(0);
env->vsr[n].VsrD(1) = vsr->VsrD(1);
}
void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
/* Raise a data fault alignment exception for the specified virtual address */
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
#endif /* PPC_INTERNAL_H */