mirror of
https://github.com/qemu/qemu.git
synced 2024-12-05 09:43:44 +08:00
target/riscv: Refactor some of the generic vector functionality
Move some macros out of `vector_helper` and into `vector_internals`. This ensures they can be used by both vector and vector-crypto helpers (latter implemented in proceeding commits). Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk> Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn> Signed-off-by: Max Chou <max.chou@sifive.com> Message-ID: <20230711165917.2629866-8-max.chou@sifive.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
62cb3e8e88
commit
2152e48b50
@ -636,9 +636,6 @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
|
||||
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
|
||||
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
|
||||
#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
|
||||
#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
|
||||
#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
|
||||
#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
|
||||
#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
|
||||
#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
|
||||
#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
|
||||
@ -3438,11 +3435,6 @@ GEN_VEXT_VF(vfwnmsac_vf_h, 4)
|
||||
GEN_VEXT_VF(vfwnmsac_vf_w, 8)
|
||||
|
||||
/* Vector Floating-Point Square-Root Instruction */
|
||||
/* (TD, T2, TX2) */
|
||||
#define OP_UU_H uint16_t, uint16_t, uint16_t
|
||||
#define OP_UU_W uint32_t, uint32_t, uint32_t
|
||||
#define OP_UU_D uint64_t, uint64_t, uint64_t
|
||||
|
||||
#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
|
||||
static void do_##NAME(void *vd, void *vs2, int i, \
|
||||
CPURISCVState *env) \
|
||||
@ -4139,40 +4131,6 @@ GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
|
||||
GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
|
||||
|
||||
/* Vector Floating-Point Classify Instruction */
|
||||
#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
|
||||
static void do_##NAME(void *vd, void *vs2, int i) \
|
||||
{ \
|
||||
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
||||
*((TD *)vd + HD(i)) = OP(s2); \
|
||||
}
|
||||
|
||||
#define GEN_VEXT_V(NAME, ESZ) \
|
||||
void HELPER(NAME)(void *vd, void *v0, void *vs2, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t vm = vext_vm(desc); \
|
||||
uint32_t vl = env->vl; \
|
||||
uint32_t total_elems = \
|
||||
vext_get_total_elems(env, desc, ESZ); \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
/* set masked-off elements to 1s */ \
|
||||
vext_set_elems_1s(vd, vma, i * ESZ, \
|
||||
(i + 1) * ESZ); \
|
||||
continue; \
|
||||
} \
|
||||
do_##NAME(vd, vs2, i); \
|
||||
} \
|
||||
env->vstart = 0; \
|
||||
/* set tail elements to 1s */ \
|
||||
vext_set_elems_1s(vd, vta, vl * ESZ, \
|
||||
total_elems * ESZ); \
|
||||
}
|
||||
|
||||
target_ulong fclass_h(uint64_t frs1)
|
||||
{
|
||||
float16 f = frs1;
|
||||
|
@ -121,12 +121,52 @@ void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
|
||||
/* expand macro args before macro */
|
||||
#define RVVCALL(macro, ...) macro(__VA_ARGS__)
|
||||
|
||||
/* (TD, T2, TX2) */
|
||||
#define OP_UU_B uint8_t, uint8_t, uint8_t
|
||||
#define OP_UU_H uint16_t, uint16_t, uint16_t
|
||||
#define OP_UU_W uint32_t, uint32_t, uint32_t
|
||||
#define OP_UU_D uint64_t, uint64_t, uint64_t
|
||||
|
||||
/* (TD, T1, T2, TX1, TX2) */
|
||||
#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
|
||||
#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
|
||||
#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
|
||||
#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
|
||||
|
||||
#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
|
||||
static void do_##NAME(void *vd, void *vs2, int i) \
|
||||
{ \
|
||||
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
||||
*((TD *)vd + HD(i)) = OP(s2); \
|
||||
}
|
||||
|
||||
#define GEN_VEXT_V(NAME, ESZ) \
|
||||
void HELPER(NAME)(void *vd, void *v0, void *vs2, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t vm = vext_vm(desc); \
|
||||
uint32_t vl = env->vl; \
|
||||
uint32_t total_elems = \
|
||||
vext_get_total_elems(env, desc, ESZ); \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
/* set masked-off elements to 1s */ \
|
||||
vext_set_elems_1s(vd, vma, i * ESZ, \
|
||||
(i + 1) * ESZ); \
|
||||
continue; \
|
||||
} \
|
||||
do_##NAME(vd, vs2, i); \
|
||||
} \
|
||||
env->vstart = 0; \
|
||||
/* set tail elements to 1s */ \
|
||||
vext_set_elems_1s(vd, vta, vl * ESZ, \
|
||||
total_elems * ESZ); \
|
||||
}
|
||||
|
||||
/* operation of two vector elements */
|
||||
typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
|
||||
|
||||
@ -179,4 +219,10 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
||||
do_##NAME, ESZ); \
|
||||
}
|
||||
|
||||
/* Three of the widening shortening macros: */
|
||||
/* (TD, T1, T2, TX1, TX2) */
|
||||
#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
|
||||
#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
|
||||
#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
|
||||
|
||||
#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
|
||||
|
Loading…
Reference in New Issue
Block a user