target/riscv: vector mask-register logical instructions

Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20200701152549.1218-50-zhiwei_liu@c-sky.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
LIU Zhiwei 2020-07-01 23:25:37 +08:00 committed by Alistair Francis
parent 696b0c260a
commit c21f34aebf
4 changed files with 92 additions and 0 deletions

View File

@ -1092,3 +1092,12 @@ DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmandnot_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmxor_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmor_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmornot_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32)

View File

@ -547,6 +547,14 @@ vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
# Vector widening ordered and unordered float reduction sum
vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r
vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
vmornot_mm 011100 - ..... ..... 010 ..... 1010111 @r
vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r

View File

@ -2354,3 +2354,38 @@ GEN_OPFVV_TRANS(vfredmin_vs, reduction_check)
/* Vector Widening Floating-Point Reduction Instructions */
GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
/*
*** Vector Mask Operations
*/
/* Vector Mask-Register Logical Instructions */
#define GEN_MM_TRANS(NAME) \
static bool trans_##NAME(DisasContext *s, arg_r *a) \
{ \
if (vext_check_isa_ill(s)) { \
uint32_t data = 0; \
gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
TCGLabel *over = gen_new_label(); \
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
\
data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), cpu_env, 0, \
s->vlen / 8, data, fn); \
gen_set_label(over); \
return true; \
} \
return false; \
}
GEN_MM_TRANS(vmand_mm)
GEN_MM_TRANS(vmnand_mm)
GEN_MM_TRANS(vmandnot_mm)
GEN_MM_TRANS(vmxor_mm)
GEN_MM_TRANS(vmor_mm)
GEN_MM_TRANS(vmnor_mm)
GEN_MM_TRANS(vmornot_mm)
GEN_MM_TRANS(vmxnor_mm)

View File

@ -4502,3 +4502,43 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
*((uint64_t *)vd) = s1;
clearq(vd, 1, sizeof(uint64_t), tot);
}
/*
*** Vector Mask Operations
*/
/* Vector Mask-Register Logical Instructions */
#define GEN_VEXT_MASK_VV(NAME, OP) \
void HELPER(NAME)(void *vd, void *v0, void *vs1, \
void *vs2, CPURISCVState *env, \
uint32_t desc) \
{ \
uint32_t mlen = vext_mlen(desc); \
uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
uint32_t vl = env->vl; \
uint32_t i; \
int a, b; \
\
for (i = 0; i < vl; i++) { \
a = vext_elem_mask(vs1, mlen, i); \
b = vext_elem_mask(vs2, mlen, i); \
vext_set_elem_mask(vd, mlen, i, OP(b, a)); \
} \
for (; i < vlmax; i++) { \
vext_set_elem_mask(vd, mlen, i, 0); \
} \
}
#define DO_NAND(N, M) (!(N & M))
#define DO_ANDNOT(N, M) (N & !M)
#define DO_NOR(N, M) (!(N | M))
#define DO_ORNOT(N, M) (N | !M)
#define DO_XNOR(N, M) (!(N ^ M))
GEN_VEXT_MASK_VV(vmand_mm, DO_AND)
GEN_VEXT_MASK_VV(vmnand_mm, DO_NAND)
GEN_VEXT_MASK_VV(vmandnot_mm, DO_ANDNOT)
GEN_VEXT_MASK_VV(vmxor_mm, DO_XOR)
GEN_VEXT_MASK_VV(vmor_mm, DO_OR)
GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR)
GEN_VEXT_MASK_VV(vmornot_mm, DO_ORNOT)
GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR)