mirror of
https://github.com/qemu/qemu.git
synced 2024-11-24 11:23:43 +08:00
host/include/i386: Implement aes-round.h
Detect AES in cpuinfo; implement the accel hooks. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
28e91474ce
commit
d6a2443696
@ -26,6 +26,7 @@
|
||||
#define CPUINFO_AVX512VBMI2 (1u << 15)
|
||||
#define CPUINFO_ATOMIC_VMOVDQA (1u << 16)
|
||||
#define CPUINFO_ATOMIC_VMOVDQU (1u << 17)
|
||||
#define CPUINFO_AES (1u << 18)
|
||||
|
||||
/* Initialized with a constructor. */
|
||||
extern unsigned cpuinfo;
|
||||
|
152
host/include/i386/host/crypto/aes-round.h
Normal file
152
host/include/i386/host/crypto/aes-round.h
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
* x86 specific aes acceleration.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef X86_HOST_CRYPTO_AES_ROUND_H
|
||||
#define X86_HOST_CRYPTO_AES_ROUND_H
|
||||
|
||||
#include "host/cpuinfo.h"
|
||||
#include <immintrin.h>
|
||||
|
||||
#if defined(__AES__) && defined(__SSSE3__)
|
||||
# define HAVE_AES_ACCEL true
|
||||
# define ATTR_AES_ACCEL
|
||||
#else
|
||||
# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES)
|
||||
# define ATTR_AES_ACCEL __attribute__((target("aes,ssse3")))
|
||||
#endif
|
||||
|
||||
static inline __m128i ATTR_AES_ACCEL
|
||||
aes_accel_bswap(__m128i x)
|
||||
{
|
||||
return _mm_shuffle_epi8(x, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9, 10, 11, 12, 13, 14, 15));
|
||||
}
|
||||
|
||||
static inline void ATTR_AES_ACCEL
|
||||
aesenc_MC_accel(AESState *ret, const AESState *st, bool be)
|
||||
{
|
||||
__m128i t = (__m128i)st->v;
|
||||
__m128i z = _mm_setzero_si128();
|
||||
|
||||
if (be) {
|
||||
t = aes_accel_bswap(t);
|
||||
t = _mm_aesdeclast_si128(t, z);
|
||||
t = _mm_aesenc_si128(t, z);
|
||||
t = aes_accel_bswap(t);
|
||||
} else {
|
||||
t = _mm_aesdeclast_si128(t, z);
|
||||
t = _mm_aesenc_si128(t, z);
|
||||
}
|
||||
ret->v = (AESStateVec)t;
|
||||
}
|
||||
|
||||
static inline void ATTR_AES_ACCEL
|
||||
aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st,
|
||||
const AESState *rk, bool be)
|
||||
{
|
||||
__m128i t = (__m128i)st->v;
|
||||
__m128i k = (__m128i)rk->v;
|
||||
|
||||
if (be) {
|
||||
t = aes_accel_bswap(t);
|
||||
k = aes_accel_bswap(k);
|
||||
t = _mm_aesenclast_si128(t, k);
|
||||
t = aes_accel_bswap(t);
|
||||
} else {
|
||||
t = _mm_aesenclast_si128(t, k);
|
||||
}
|
||||
ret->v = (AESStateVec)t;
|
||||
}
|
||||
|
||||
static inline void ATTR_AES_ACCEL
|
||||
aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st,
|
||||
const AESState *rk, bool be)
|
||||
{
|
||||
__m128i t = (__m128i)st->v;
|
||||
__m128i k = (__m128i)rk->v;
|
||||
|
||||
if (be) {
|
||||
t = aes_accel_bswap(t);
|
||||
k = aes_accel_bswap(k);
|
||||
t = _mm_aesenc_si128(t, k);
|
||||
t = aes_accel_bswap(t);
|
||||
} else {
|
||||
t = _mm_aesenc_si128(t, k);
|
||||
}
|
||||
ret->v = (AESStateVec)t;
|
||||
}
|
||||
|
||||
static inline void ATTR_AES_ACCEL
|
||||
aesdec_IMC_accel(AESState *ret, const AESState *st, bool be)
|
||||
{
|
||||
__m128i t = (__m128i)st->v;
|
||||
|
||||
if (be) {
|
||||
t = aes_accel_bswap(t);
|
||||
t = _mm_aesimc_si128(t);
|
||||
t = aes_accel_bswap(t);
|
||||
} else {
|
||||
t = _mm_aesimc_si128(t);
|
||||
}
|
||||
ret->v = (AESStateVec)t;
|
||||
}
|
||||
|
||||
static inline void ATTR_AES_ACCEL
|
||||
aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st,
|
||||
const AESState *rk, bool be)
|
||||
{
|
||||
__m128i t = (__m128i)st->v;
|
||||
__m128i k = (__m128i)rk->v;
|
||||
|
||||
if (be) {
|
||||
t = aes_accel_bswap(t);
|
||||
k = aes_accel_bswap(k);
|
||||
t = _mm_aesdeclast_si128(t, k);
|
||||
t = aes_accel_bswap(t);
|
||||
} else {
|
||||
t = _mm_aesdeclast_si128(t, k);
|
||||
}
|
||||
ret->v = (AESStateVec)t;
|
||||
}
|
||||
|
||||
static inline void ATTR_AES_ACCEL
|
||||
aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st,
|
||||
const AESState *rk, bool be)
|
||||
{
|
||||
__m128i t = (__m128i)st->v;
|
||||
__m128i k = (__m128i)rk->v;
|
||||
|
||||
if (be) {
|
||||
t = aes_accel_bswap(t);
|
||||
k = aes_accel_bswap(k);
|
||||
t = _mm_aesdeclast_si128(t, k);
|
||||
t = _mm_aesimc_si128(t);
|
||||
t = aes_accel_bswap(t);
|
||||
} else {
|
||||
t = _mm_aesdeclast_si128(t, k);
|
||||
t = _mm_aesimc_si128(t);
|
||||
}
|
||||
ret->v = (AESStateVec)t;
|
||||
}
|
||||
|
||||
static inline void ATTR_AES_ACCEL
|
||||
aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st,
|
||||
const AESState *rk, bool be)
|
||||
{
|
||||
__m128i t = (__m128i)st->v;
|
||||
__m128i k = (__m128i)rk->v;
|
||||
|
||||
if (be) {
|
||||
t = aes_accel_bswap(t);
|
||||
k = aes_accel_bswap(k);
|
||||
t = _mm_aesdec_si128(t, k);
|
||||
t = aes_accel_bswap(t);
|
||||
} else {
|
||||
t = _mm_aesdec_si128(t, k);
|
||||
}
|
||||
ret->v = (AESStateVec)t;
|
||||
}
|
||||
|
||||
#endif /* X86_HOST_CRYPTO_AES_ROUND_H */
|
1
host/include/x86_64/host/crypto/aes-round.h
Normal file
1
host/include/x86_64/host/crypto/aes-round.h
Normal file
@ -0,0 +1 @@
|
||||
#include "host/include/i386/host/crypto/aes-round.h"
|
@ -40,6 +40,9 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
|
||||
info |= (c & bit_MOVBE ? CPUINFO_MOVBE : 0);
|
||||
info |= (c & bit_POPCNT ? CPUINFO_POPCNT : 0);
|
||||
|
||||
/* Our AES support requires PSHUFB as well. */
|
||||
info |= ((c & bit_AES) && (c & bit_SSSE3) ? CPUINFO_AES : 0);
|
||||
|
||||
/* For AVX features, we must check available and usable. */
|
||||
if ((c & bit_AVX) && (c & bit_OSXSAVE)) {
|
||||
unsigned bv = xgetbv_low(0);
|
||||
|
Loading…
Reference in New Issue
Block a user