This update includes the following changes:

API:
 
 - Total usage stats now include all that returned error (instead of some).
 - Remove maximum hash statesize limit.
 - Add cloning support for hmac and unkeyed hashes.
 - Demote BUG_ON in crypto_unregister_alg to a WARN_ON.
 
 Algorithms:
 
 - Use RIP-relative addressing on x86 to prepare for PIE build.
 - Add accelerated AES/GCM stitched implementation on powerpc P10.
 - Add some test vectors for cmac(camellia).
 - Remove failure case where jent is unavailable outside of FIPS mode in drbg.
 - Add permanent and intermittent health error checks in jitter RNG.
 
 Drivers:
 
 - Add support for 402xx devices in qat.
 - Add support for HiSTB TRNG.
 - Fix hash concurrency issues in stm32.
 - Add OP-TEE firmware support in caam.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmRGCjcACgkQxycdCkmx
 i6d6JA//ZmwgEqAKA8qWpHnNKZylTLqFhLxnKZwr4Hhp1KzManh/T9pepXiD2zAY
 D92wU60v0hfGAazeUWQRmrIZxcjyd3b3Tr7WiFuNoZbkPsuXWZAoz8iHgMq69dqb
 DXZhKJnlmVlcr+qTSk9MP8HODL5kU6Ug2pk+r8hL/WsBI+JGfZEXKcJhhMqYLYls
 nl+NN4fkE5tgcTh2lp/9dQsQRylhESZuqb8L2wItQmripSbhPGwYf24I7B7xcGrn
 o7X4XG//cQO6zQErgnOJOosIgJEEynW27CN4ZiHB8WhRAk0YLXydQBs6EjZgNA8H
 EvZC/bIx2YOt8ngG99q4kRg4OgKp4c7UnV6l1pxuJWbIyXrFh4djxHdq9pTYr3UB
 P3pVEX38Wu7U5Tfgy3y1QqZzsvrPjmnI3NQ8QBrcFzNRDan5K6nH4kQyk9Cv7LQm
 GlE1JOThU5U2G33ZWKCluJUjVUCRceMWQYla1X5R4uWMCwSqRMpmx8Ib9QvbYlWe
 iUI+RatLnlIobx+lgaC8mtij9dQddFjk6YwFYhQcD3Bl30DhTeIlbnOUY9YOTXps
 H6V9X2inVUjyZr1uJ4a7rPdCUuzQxR6HWPyp6fXMlbLrEhL8e6c4/QbEoTubRQeS
 WTtoIFt4ezd2SG6hI6dTCscgFc5EAyEMDD5GtQmJeyozu0Gqtpo=
 =ITkW
 -----END PGP SIGNATURE-----

Merge tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:
   - Total usage stats now include all that returned errors (instead of
     just some)
   - Remove maximum hash statesize limit
   - Add cloning support for hmac and unkeyed hashes
   - Demote BUG_ON in crypto_unregister_alg to a WARN_ON

  Algorithms:
   - Use RIP-relative addressing on x86 to prepare for PIE build
   - Add accelerated AES/GCM stitched implementation on powerpc P10
   - Add some test vectors for cmac(camellia)
   - Remove failure case where jent is unavailable outside of FIPS mode
     in drbg
   - Add permanent and intermittent health error checks in jitter RNG

  Drivers:
   - Add support for 402xx devices in qat
   - Add support for HiSTB TRNG
   - Fix hash concurrency issues in stm32
   - Add OP-TEE firmware support in caam"

* tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (139 commits)
  i2c: designware: Add doorbell support for Mendocino
  i2c: designware: Use PCI PSP driver for communication
  powerpc: Move Power10 feature PPC_MODULE_FEATURE_P10
  crypto: p10-aes-gcm - Remove POWER10_CPU dependency
  crypto: testmgr - Add some test vectors for cmac(camellia)
  crypto: cryptd - Add support for cloning hashes
  crypto: cryptd - Convert hash to use modern init_tfm/exit_tfm
  crypto: hmac - Add support for cloning
  crypto: hash - Add crypto_clone_ahash/shash
  crypto: api - Add crypto_clone_tfm
  crypto: api - Add crypto_tfm_get
  crypto: x86/sha - Use local .L symbols for code
  crypto: x86/crc32 - Use local .L symbols for code
  crypto: x86/aesni - Use local .L symbols for code
  crypto: x86/sha256 - Use RIP-relative addressing
  crypto: x86/ghash - Use RIP-relative addressing
  crypto: x86/des3 - Use RIP-relative addressing
  crypto: x86/crc32c - Use RIP-relative addressing
  crypto: x86/cast6 - Use RIP-relative addressing
  crypto: x86/cast5 - Use RIP-relative addressing
  ...
This commit is contained in:
Linus Torvalds 2023-04-26 08:32:52 -07:00
commit 733f7e9c18
257 changed files with 6781 additions and 2735 deletions

View File

@ -1,25 +0,0 @@
Qualcomm crypto engine driver
Required properties:
- compatible : should be "qcom,crypto-v5.1"
- reg : specifies base physical address and size of the registers map
- clocks : phandle to clock-controller plus clock-specifier pair
- clock-names : "iface" clocks register interface
"bus" clocks data transfer interface
"core" clocks rest of the crypto block
- dmas : DMA specifiers for tx and rx dma channels. For more see
Documentation/devicetree/bindings/dma/dma.txt
- dma-names : DMA request names should be "rx" and "tx"
Example:
crypto@fd45a000 {
compatible = "qcom,crypto-v5.1";
reg = <0xfd45a000 0x6000>;
clocks = <&gcc GCC_CE2_AHB_CLK>,
<&gcc GCC_CE2_AXI_CLK>,
<&gcc GCC_CE2_CLK>;
clock-names = "iface", "bus", "core";
dmas = <&cryptobam 2>, <&cryptobam 3>;
dma-names = "rx", "tx";
};

View File

@ -0,0 +1,123 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/crypto/qcom-qce.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm crypto engine driver
maintainers:
- Bhupesh Sharma <bhupesh.sharma@linaro.org>
description:
This document defines the binding for the QCE crypto
controller found on Qualcomm parts.
properties:
compatible:
oneOf:
- const: qcom,crypto-v5.1
deprecated: true
description: Kept only for ABI backward compatibility
- const: qcom,crypto-v5.4
deprecated: true
description: Kept only for ABI backward compatibility
- items:
- enum:
- qcom,ipq6018-qce
- qcom,ipq8074-qce
- qcom,msm8996-qce
- qcom,sdm845-qce
- const: qcom,ipq4019-qce
- const: qcom,qce
- items:
- enum:
- qcom,sm8250-qce
- qcom,sm8350-qce
- qcom,sm8450-qce
- qcom,sm8550-qce
- const: qcom,sm8150-qce
- const: qcom,qce
reg:
maxItems: 1
clocks:
items:
- description: iface clocks register interface.
- description: bus clocks data transfer interface.
- description: core clocks rest of the crypto block.
clock-names:
items:
- const: iface
- const: bus
- const: core
iommus:
minItems: 1
maxItems: 8
description:
phandle to apps_smmu node with sid mask.
interconnects:
maxItems: 1
description:
Interconnect path between qce crypto and main memory.
interconnect-names:
const: memory
dmas:
items:
- description: DMA specifiers for rx dma channel.
- description: DMA specifiers for tx dma channel.
dma-names:
items:
- const: rx
- const: tx
allOf:
- if:
properties:
compatible:
contains:
enum:
- qcom,crypto-v5.1
- qcom,crypto-v5.4
- qcom,ipq4019-qce
then:
required:
- clocks
- clock-names
required:
- compatible
- reg
- dmas
- dma-names
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,gcc-apq8084.h>
crypto-engine@fd45a000 {
compatible = "qcom,ipq6018-qce", "qcom,ipq4019-qce", "qcom,qce";
reg = <0xfd45a000 0x6000>;
clocks = <&gcc GCC_CE2_AHB_CLK>,
<&gcc GCC_CE2_AXI_CLK>,
<&gcc GCC_CE2_CLK>;
clock-names = "iface", "bus", "core";
dmas = <&cryptobam 2>, <&cryptobam 3>;
dma-names = "rx", "tx";
iommus = <&apps_smmu 0x584 0x0011>,
<&apps_smmu 0x586 0x0011>,
<&apps_smmu 0x594 0x0011>,
<&apps_smmu 0x596 0x0011>;
};

View File

@ -2269,7 +2269,7 @@ F: arch/arm/boot/dts/intel-ixp*
F: arch/arm/mach-ixp4xx/
F: drivers/bus/intel-ixp4xx-eb.c
F: drivers/clocksource/timer-ixp4xx.c
F: drivers/crypto/ixp4xx_crypto.c
F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
F: drivers/gpio/gpio-ixp4xx.c
F: drivers/irqchip/irq-ixp4xx.c
@ -10391,7 +10391,7 @@ INTEL IXP4XX CRYPTO SUPPORT
M: Corentin Labbe <clabbe@baylibre.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/ixp4xx_crypto.c
F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
INTEL ISHTP ECLITE DRIVER
M: Sumesh K Naduvalath <sumesh.k.naduvalath@intel.com>
@ -10426,11 +10426,11 @@ INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-aes.yaml
F: drivers/crypto/keembay/Kconfig
F: drivers/crypto/keembay/Makefile
F: drivers/crypto/keembay/keembay-ocs-aes-core.c
F: drivers/crypto/keembay/ocs-aes.c
F: drivers/crypto/keembay/ocs-aes.h
F: drivers/crypto/intel/keembay/Kconfig
F: drivers/crypto/intel/keembay/Makefile
F: drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
F: drivers/crypto/intel/keembay/ocs-aes.c
F: drivers/crypto/intel/keembay/ocs-aes.h
INTEL KEEM BAY OCS ECC CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
@ -10438,20 +10438,20 @@ M: Prabhjot Khurana <prabhjot.khurana@intel.com>
M: Mark Gross <mgross@linux.intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-ecc.yaml
F: drivers/crypto/keembay/Kconfig
F: drivers/crypto/keembay/Makefile
F: drivers/crypto/keembay/keembay-ocs-ecc.c
F: drivers/crypto/intel/keembay/Kconfig
F: drivers/crypto/intel/keembay/Makefile
F: drivers/crypto/intel/keembay/keembay-ocs-ecc.c
INTEL KEEM BAY OCS HCU CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
M: Declan Murphy <declan.murphy@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
F: drivers/crypto/keembay/Kconfig
F: drivers/crypto/keembay/Makefile
F: drivers/crypto/keembay/keembay-ocs-hcu-core.c
F: drivers/crypto/keembay/ocs-hcu.c
F: drivers/crypto/keembay/ocs-hcu.h
F: drivers/crypto/intel/keembay/Kconfig
F: drivers/crypto/intel/keembay/Makefile
F: drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
F: drivers/crypto/intel/keembay/ocs-hcu.c
F: drivers/crypto/intel/keembay/ocs-hcu.h
INTEL THUNDER BAY EMMC PHY DRIVER
M: Nandhini Srikandan <nandhini.srikandan@intel.com>
@ -17027,7 +17027,7 @@ QAT DRIVER
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
L: qat-linux@intel.com
S: Supported
F: drivers/crypto/qat/
F: drivers/crypto/intel/qat/
QCOM AUDIO (ASoC) DRIVERS
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@ -17295,6 +17295,7 @@ M: Thara Gopinath <thara.gopinath@gmail.com>
L: linux-crypto@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/crypto/qcom-qce.yaml
F: drivers/crypto/qce/
QUALCOMM EMAC GIGABIT ETHERNET DRIVER

View File

@ -1850,7 +1850,7 @@
};
crypto: crypto@1de0000 {
compatible = "qcom,sm8550-qce";
compatible = "qcom,sm8550-qce", "qcom,sm8150-qce", "qcom,qce";
reg = <0x0 0x01dfa000 0x0 0x6000>;
dmas = <&cryptobam 4>, <&cryptobam 5>;
dma-names = "rx", "tx";

View File

@ -15,6 +15,7 @@
*/
#include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h>
.text
@ -620,12 +621,12 @@ SYM_FUNC_END(aesbs_decrypt8)
.endm
.align 4
SYM_FUNC_START(aesbs_ecb_encrypt)
SYM_TYPED_FUNC_START(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
SYM_FUNC_END(aesbs_ecb_encrypt)
.align 4
SYM_FUNC_START(aesbs_ecb_decrypt)
SYM_TYPED_FUNC_START(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
SYM_FUNC_END(aesbs_ecb_decrypt)
@ -799,11 +800,11 @@ SYM_FUNC_END(__xts_crypt8)
ret
.endm
SYM_FUNC_START(aesbs_xts_encrypt)
SYM_TYPED_FUNC_START(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
SYM_FUNC_END(aesbs_xts_encrypt)
SYM_FUNC_START(aesbs_xts_decrypt)
SYM_TYPED_FUNC_START(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
SYM_FUNC_END(aesbs_xts_decrypt)

View File

@ -94,4 +94,21 @@ config CRYPTO_AES_PPC_SPE
architecture specific assembler implementations that work on 1KB
tables or 256 bytes S-boxes.
config CRYPTO_AES_GCM_P10
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
depends on PPC64 && CPU_LITTLE_ENDIAN
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI
select CRYPTO_AEAD
default m
help
AEAD cipher: AES cipher algorithms (FIPS-197)
GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D)
Architecture: powerpc64 using:
- little-endian
- Power10 or later features
Support for cryptographic acceleration instructions on Power10 or
later CPU. This module supports stitched acceleration for AES/GCM.
endmenu

View File

@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
@ -21,3 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
targets += aesp8-ppc.S ghashp8-ppc.S
$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perl)
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y

View File

@ -0,0 +1,343 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue code for accelerated AES-GCM stitched implementation for ppc64le.
*
* Copyright 2022- IBM Inc. All rights reserved
*/
#include <asm/unaligned.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/types.h>
#define PPC_ALIGN 16
#define GCM_IV_SIZE 12
MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("aes");
asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
void *key);
asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
unsigned char *aad, unsigned int alen);
struct aes_key {
u8 key[AES_MAX_KEYLENGTH];
u64 rounds;
};
struct gcm_ctx {
u8 iv[16];
u8 ivtag[16];
u8 aad_hash[16];
u64 aadLen;
u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */
};
struct Hash_ctx {
u8 H[16]; /* subkey */
u8 Htable[256]; /* Xi, Hash table(offset 32) */
};
struct p10_aes_gcm_ctx {
struct aes_key enc_key;
};
static void vsx_begin(void)
{
preempt_disable();
enable_kernel_vsx();
}
static void vsx_end(void)
{
disable_kernel_vsx();
preempt_enable();
}
static void set_subkey(unsigned char *hash)
{
*(u64 *)&hash[0] = be64_to_cpup((__be64 *)&hash[0]);
*(u64 *)&hash[8] = be64_to_cpup((__be64 *)&hash[8]);
}
/*
* Compute aad if any.
* - Hash aad and copy to Xi.
*/
static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
unsigned char *aad, int alen)
{
int i;
u8 nXi[16] = {0, };
gctx->aadLen = alen;
i = alen & ~0xf;
if (i) {
gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
aad += i;
alen -= i;
}
if (alen) {
for (i = 0; i < alen; i++)
nXi[i] ^= aad[i];
memset(gctx->aad_hash, 0, 16);
gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
} else {
memcpy(gctx->aad_hash, nXi, 16);
}
memcpy(hash->Htable, gctx->aad_hash, 16);
}
static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
struct Hash_ctx *hash, u8 *assoc, unsigned int assoclen)
{
__be32 counter = cpu_to_be32(1);
aes_p8_encrypt(hash->H, hash->H, rdkey);
set_subkey(hash->H);
gcm_init_htable(hash->Htable+32, hash->H);
*((__be32 *)(iv+12)) = counter;
gctx->Plen = 0;
/*
* Encrypt counter vector as iv tag and increment counter.
*/
aes_p8_encrypt(iv, gctx->ivtag, rdkey);
counter = cpu_to_be32(2);
*((__be32 *)(iv+12)) = counter;
memcpy(gctx->iv, iv, 16);
gctx->aadLen = assoclen;
memset(gctx->aad_hash, 0, 16);
if (assoclen)
set_aad(gctx, hash, assoc, assoclen);
}
static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
{
int i;
unsigned char len_ac[16 + PPC_ALIGN];
unsigned char *aclen = PTR_ALIGN((void *)len_ac, PPC_ALIGN);
__be64 clen = cpu_to_be64(len << 3);
__be64 alen = cpu_to_be64(gctx->aadLen << 3);
if (len == 0 && gctx->aadLen == 0) {
memcpy(hash->Htable, gctx->ivtag, 16);
return;
}
/*
* Len is in bits.
*/
*((__be64 *)(aclen)) = alen;
*((__be64 *)(aclen+8)) = clen;
/*
* hash (AAD len and len)
*/
gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
for (i = 0; i < 16; i++)
hash->Htable[i] ^= gctx->ivtag[i];
}
static int set_authsize(struct crypto_aead *tfm, unsigned int authsize)
{
switch (authsize) {
case 4:
case 8:
case 12:
case 13:
case 14:
case 15:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
vsx_begin();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
vsx_end();
return ret ? -EINVAL : 0;
}
static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
{
struct crypto_tfm *tfm = req->base.tfm;
struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
struct scatter_walk assoc_sg_walk;
struct skcipher_walk walk;
u8 *assocmem = NULL;
u8 *assoc;
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
int ret;
unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
u8 otag[16];
int total_processed = 0;
memset(databuf, 0, sizeof(databuf));
memset(hashbuf, 0, sizeof(hashbuf));
memset(ivbuf, 0, sizeof(ivbuf));
memcpy(iv, req->iv, GCM_IV_SIZE);
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length) {
scatterwalk_start(&assoc_sg_walk, req->src);
assoc = scatterwalk_map(&assoc_sg_walk);
} else {
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
/* assoc can be any length, so must be on heap */
assocmem = kmalloc(assoclen, flags);
if (unlikely(!assocmem))
return -ENOMEM;
assoc = assocmem;
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
}
vsx_begin();
gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
vsx_end();
if (!assocmem)
scatterwalk_unmap(assoc);
else
kfree(assocmem);
if (enc)
ret = skcipher_walk_aead_encrypt(&walk, req, false);
else
ret = skcipher_walk_aead_decrypt(&walk, req, false);
if (ret)
return ret;
while (walk.nbytes > 0 && ret == 0) {
vsx_begin();
if (enc)
aes_p10_gcm_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
else
aes_p10_gcm_decrypt(walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
vsx_end();
total_processed += walk.nbytes;
ret = skcipher_walk_done(&walk, 0);
}
if (ret)
return ret;
/* Finalize hash */
vsx_begin();
finish_tag(gctx, hash, total_processed);
vsx_end();
/* copy Xi to end of dst */
if (enc)
scatterwalk_map_and_copy(hash->Htable, req->dst, req->assoclen + cryptlen,
auth_tag_len, 1);
else {
scatterwalk_map_and_copy(otag, req->src,
req->assoclen + cryptlen - auth_tag_len,
auth_tag_len, 0);
if (crypto_memneq(otag, hash->Htable, auth_tag_len)) {
memzero_explicit(hash->Htable, 16);
return -EBADMSG;
}
}
return 0;
}
static int p10_aes_gcm_encrypt(struct aead_request *req)
{
return p10_aes_gcm_crypt(req, 1);
}
static int p10_aes_gcm_decrypt(struct aead_request *req)
{
return p10_aes_gcm_crypt(req, 0);
}
static struct aead_alg gcm_aes_alg = {
.ivsize = GCM_IV_SIZE,
.maxauthsize = 16,
.setauthsize = set_authsize,
.setkey = p10_aes_gcm_setkey,
.encrypt = p10_aes_gcm_encrypt,
.decrypt = p10_aes_gcm_decrypt,
.base.cra_name = "gcm(aes)",
.base.cra_driver_name = "aes_gcm_p10",
.base.cra_priority = 2100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx),
.base.cra_module = THIS_MODULE,
};
static int __init p10_init(void)
{
return crypto_register_aead(&gcm_aes_alg);
}
static void __exit p10_exit(void)
{
crypto_unregister_aead(&gcm_aes_alg);
}
module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
module_exit(p10_exit);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,585 @@
#! /usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
# This code is taken from CRYPTOGAMs[1] and is included here using the option
# in the license to distribute the code under the GPL. Therefore this program
# is free software; you can redistribute it and/or modify it under the terms of
# the GNU General Public License version 2 as published by the Free Software
# Foundation.
#
# [1] https://www.openssl.org/~appro/cryptogams/
# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain copyright notices,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# * Neither the name of the CRYPTOGAMS nor the names of its
# copyright holder and contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# ALTERNATIVELY, provided that this notice is retained in full, this
# product may be distributed under the terms of the GNU General Public
# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
# those given above.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see https://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# This module implements support for AES instructions as per PowerISA
# specification version 2.07, first implemented by POWER8 processor.
# The module is endian-agnostic in sense that it supports both big-
# and little-endian cases. Data alignment in parallelizable modes is
# handled with VSX loads and stores, which implies MSR.VSX flag being
# set. It should also be noted that ISA specification doesn't prohibit
# alignment exceptions for these instructions on page boundaries.
# Initially alignment was handled in pure AltiVec/VMX way [when data
# is aligned programmatically, which in turn guarantees exception-
# free execution], but it turned to hamper performance when vcipher
# instructions are interleaved. It's reckoned that eventual
# misalignment penalties at page boundaries are in average lower
# than additional overhead in pure AltiVec approach.
#
# May 2016
#
# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
# systems were measured.
#
######################################################################
# Current large-block performance in cycles per byte processed with
# 128-bit key (less is better).
#
# CBC en-/decrypt CTR XTS
# POWER8[le] 3.96/0.72 0.74 1.1
# POWER8[be] 3.75/0.65 0.66 1.0
$flavour = shift;
if ($flavour =~ /64/) {
$SIZE_T =8;
$LRSAVE =2*$SIZE_T;
$STU ="stdu";
$POP ="ld";
$PUSH ="std";
$UCMP ="cmpld";
$SHL ="sldi";
} elsif ($flavour =~ /32/) {
$SIZE_T =4;
$LRSAVE =$SIZE_T;
$STU ="stwu";
$POP ="lwz";
$PUSH ="stw";
$UCMP ="cmplw";
$SHL ="slwi";
} else { die "nonsense $flavour"; }
$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
die "can't locate ppc-xlate.pl";
open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
$FRAME=8*$SIZE_T;
$prefix="aes_p8";
$sp="r1";
$vrsave="r12";
#########################################################################
{{{ # Key setup procedures #
my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
$code.=<<___;
.machine "any"
.text
.align 7
rcon:
.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev
.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev
.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev
.long 0,0,0,0 ?asis
Lconsts:
mflr r0
bcl 20,31,\$+4
mflr $ptr #vvvvv "distance between . and rcon
addi $ptr,$ptr,-0x48
mtlr r0
blr
.long 0
.byte 0,12,0x14,0,0,0,0,0
.asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
.globl .${prefix}_set_encrypt_key
Lset_encrypt_key:
mflr r11
$PUSH r11,$LRSAVE($sp)
li $ptr,-1
${UCMP}i $inp,0
beq- Lenc_key_abort # if ($inp==0) return -1;
${UCMP}i $out,0
beq- Lenc_key_abort # if ($out==0) return -1;
li $ptr,-2
cmpwi $bits,128
blt- Lenc_key_abort
cmpwi $bits,256
bgt- Lenc_key_abort
andi. r0,$bits,0x3f
bne- Lenc_key_abort
lis r0,0xfff0
mfspr $vrsave,256
mtspr 256,r0
bl Lconsts
mtlr r11
neg r9,$inp
lvx $in0,0,$inp
addi $inp,$inp,15 # 15 is not typo
lvsr $key,0,r9 # borrow $key
li r8,0x20
cmpwi $bits,192
lvx $in1,0,$inp
le?vspltisb $mask,0x0f # borrow $mask
lvx $rcon,0,$ptr
le?vxor $key,$key,$mask # adjust for byte swap
lvx $mask,r8,$ptr
addi $ptr,$ptr,0x10
vperm $in0,$in0,$in1,$key # align [and byte swap in LE]
li $cnt,8
vxor $zero,$zero,$zero
mtctr $cnt
?lvsr $outperm,0,$out
vspltisb $outmask,-1
lvx $outhead,0,$out
?vperm $outmask,$zero,$outmask,$outperm
blt Loop128
addi $inp,$inp,8
beq L192
addi $inp,$inp,8
b L256
.align 4
Loop128:
vperm $key,$in0,$in0,$mask # rotate-n-splat
vsldoi $tmp,$zero,$in0,12 # >>32
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
vcipherlast $key,$key,$rcon
stvx $stage,0,$out
addi $out,$out,16
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vadduwm $rcon,$rcon,$rcon
vxor $in0,$in0,$key
bdnz Loop128
lvx $rcon,0,$ptr # last two round keys
vperm $key,$in0,$in0,$mask # rotate-n-splat
vsldoi $tmp,$zero,$in0,12 # >>32
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
vcipherlast $key,$key,$rcon
stvx $stage,0,$out
addi $out,$out,16
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vadduwm $rcon,$rcon,$rcon
vxor $in0,$in0,$key
vperm $key,$in0,$in0,$mask # rotate-n-splat
vsldoi $tmp,$zero,$in0,12 # >>32
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
vcipherlast $key,$key,$rcon
stvx $stage,0,$out
addi $out,$out,16
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vxor $in0,$in0,$key
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
stvx $stage,0,$out
addi $inp,$out,15 # 15 is not typo
addi $out,$out,0x50
li $rounds,10
b Ldone
.align 4
L192:
lvx $tmp,0,$inp
li $cnt,4
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
stvx $stage,0,$out
addi $out,$out,16
vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
vspltisb $key,8 # borrow $key
mtctr $cnt
vsububm $mask,$mask,$key # adjust the mask
Loop192:
vperm $key,$in1,$in1,$mask # roate-n-splat
vsldoi $tmp,$zero,$in0,12 # >>32
vcipherlast $key,$key,$rcon
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vsldoi $stage,$zero,$in1,8
vspltw $tmp,$in0,3
vxor $tmp,$tmp,$in1
vsldoi $in1,$zero,$in1,12 # >>32
vadduwm $rcon,$rcon,$rcon
vxor $in1,$in1,$tmp
vxor $in0,$in0,$key
vxor $in1,$in1,$key
vsldoi $stage,$stage,$in0,8
vperm $key,$in1,$in1,$mask # rotate-n-splat
vsldoi $tmp,$zero,$in0,12 # >>32
vperm $outtail,$stage,$stage,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
vcipherlast $key,$key,$rcon
stvx $stage,0,$out
addi $out,$out,16
vsldoi $stage,$in0,$in1,8
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vperm $outtail,$stage,$stage,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
stvx $stage,0,$out
addi $out,$out,16
vspltw $tmp,$in0,3
vxor $tmp,$tmp,$in1
vsldoi $in1,$zero,$in1,12 # >>32
vadduwm $rcon,$rcon,$rcon
vxor $in1,$in1,$tmp
vxor $in0,$in0,$key
vxor $in1,$in1,$key
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
stvx $stage,0,$out
addi $inp,$out,15 # 15 is not typo
addi $out,$out,16
bdnz Loop192
li $rounds,12
addi $out,$out,0x20
b Ldone
.align 4
L256:
lvx $tmp,0,$inp
li $cnt,7
li $rounds,14
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
stvx $stage,0,$out
addi $out,$out,16
vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
mtctr $cnt
Loop256:
vperm $key,$in1,$in1,$mask # rotate-n-splat
vsldoi $tmp,$zero,$in0,12 # >>32
vperm $outtail,$in1,$in1,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
vcipherlast $key,$key,$rcon
stvx $stage,0,$out
addi $out,$out,16
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in0,$in0,$tmp
vadduwm $rcon,$rcon,$rcon
vxor $in0,$in0,$key
vperm $outtail,$in0,$in0,$outperm # rotate
vsel $stage,$outhead,$outtail,$outmask
vmr $outhead,$outtail
stvx $stage,0,$out
addi $inp,$out,15 # 15 is not typo
addi $out,$out,16
bdz Ldone
vspltw $key,$in0,3 # just splat
vsldoi $tmp,$zero,$in1,12 # >>32
vsbox $key,$key
vxor $in1,$in1,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in1,$in1,$tmp
vsldoi $tmp,$zero,$tmp,12 # >>32
vxor $in1,$in1,$tmp
vxor $in1,$in1,$key
b Loop256
.align 4
Ldone:
lvx $in1,0,$inp # redundant in aligned case
vsel $in1,$outhead,$in1,$outmask
stvx $in1,0,$inp
li $ptr,0
mtspr 256,$vrsave
stw $rounds,0($out)
Lenc_key_abort:
mr r3,$ptr
blr
.long 0
.byte 0,12,0x14,1,0,0,3,0
.long 0
.size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
.globl .${prefix}_set_decrypt_key
$STU $sp,-$FRAME($sp)
mflr r10
$PUSH r10,$FRAME+$LRSAVE($sp)
bl Lset_encrypt_key
mtlr r10
cmpwi r3,0
bne- Ldec_key_abort
slwi $cnt,$rounds,4
subi $inp,$out,240 # first round key
srwi $rounds,$rounds,1
add $out,$inp,$cnt # last round key
mtctr $rounds
Ldeckey:
lwz r0, 0($inp)
lwz r6, 4($inp)
lwz r7, 8($inp)
lwz r8, 12($inp)
addi $inp,$inp,16
lwz r9, 0($out)
lwz r10,4($out)
lwz r11,8($out)
lwz r12,12($out)
stw r0, 0($out)
stw r6, 4($out)
stw r7, 8($out)
stw r8, 12($out)
subi $out,$out,16
stw r9, -16($inp)
stw r10,-12($inp)
stw r11,-8($inp)
stw r12,-4($inp)
bdnz Ldeckey
xor r3,r3,r3 # return value
Ldec_key_abort:
addi $sp,$sp,$FRAME
blr
.long 0
.byte 0,12,4,1,0x80,0,3,0
.long 0
.size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
___
}}}
#########################################################################
{{{ # Single block en- and decrypt procedures #
sub gen_block () {
my $dir = shift;
my $n = $dir eq "de" ? "n" : "";
my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
$code.=<<___;
.globl .${prefix}_${dir}crypt
lwz $rounds,240($key)
lis r0,0xfc00
mfspr $vrsave,256
li $idx,15 # 15 is not typo
mtspr 256,r0
lvx v0,0,$inp
neg r11,$out
lvx v1,$idx,$inp
lvsl v2,0,$inp # inpperm
le?vspltisb v4,0x0f
?lvsl v3,0,r11 # outperm
le?vxor v2,v2,v4
li $idx,16
vperm v0,v0,v1,v2 # align [and byte swap in LE]
lvx v1,0,$key
?lvsl v5,0,$key # keyperm
srwi $rounds,$rounds,1
lvx v2,$idx,$key
addi $idx,$idx,16
subi $rounds,$rounds,1
?vperm v1,v1,v2,v5 # align round key
vxor v0,v0,v1
lvx v1,$idx,$key
addi $idx,$idx,16
mtctr $rounds
Loop_${dir}c:
?vperm v2,v2,v1,v5
v${n}cipher v0,v0,v2
lvx v2,$idx,$key
addi $idx,$idx,16
?vperm v1,v1,v2,v5
v${n}cipher v0,v0,v1
lvx v1,$idx,$key
addi $idx,$idx,16
bdnz Loop_${dir}c
?vperm v2,v2,v1,v5
v${n}cipher v0,v0,v2
lvx v2,$idx,$key
?vperm v1,v1,v2,v5
v${n}cipherlast v0,v0,v1
vspltisb v2,-1
vxor v1,v1,v1
li $idx,15 # 15 is not typo
?vperm v2,v1,v2,v3 # outmask
le?vxor v3,v3,v4
lvx v1,0,$out # outhead
vperm v0,v0,v0,v3 # rotate [and byte swap in LE]
vsel v1,v1,v0,v2
lvx v4,$idx,$out
stvx v1,0,$out
vsel v0,v0,v4,v2
stvx v0,$idx,$out
mtspr 256,$vrsave
blr
.long 0
.byte 0,12,0x14,0,0,0,3,0
.long 0
.size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
___
}
&gen_block("en");
&gen_block("de");
}}}
my $consts=1;
foreach(split("\n",$code)) {
s/\`([^\`]*)\`/eval($1)/geo;
# constants table endian-specific conversion
if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
my $conv=$3;
my @bytes=();
# convert to endian-agnostic format
if ($1 eq "long") {
foreach (split(/,\s*/,$2)) {
my $l = /^0/?oct:int;
push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
}
} else {
@bytes = map(/^0/?oct:int,split(/,\s*/,$2));
}
# little-endian conversion
if ($flavour =~ /le$/o) {
SWITCH: for($conv) {
/\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
/\?rev/ && do { @bytes=reverse(@bytes); last; };
}
}
#emit
print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
next;
}
$consts=0 if (m/Lconsts:/o); # end of table
# instructions prefixed with '?' are endian-specific and need
# to be adjusted accordingly...
if ($flavour =~ /le$/o) { # little-endian
s/le\?//o or
s/be\?/#be#/o or
s/\?lvsr/lvsl/o or
s/\?lvsl/lvsr/o or
s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
} else { # big-endian
s/le\?/#le#/o or
s/be\?//o or
s/\?([a-z]+)/$1/o;
}
print $_,"\n";
}
close STDOUT;

View File

@ -0,0 +1,370 @@
#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
# This code is taken from the OpenSSL project but the author (Andy Polyakov)
# has relicensed it under the GPLv2. Therefore this program is free software;
# you can redistribute it and/or modify it under the terms of the GNU General
# Public License version 2 as published by the Free Software Foundation.
#
# The original headers, including the original license headers, are
# included below for completeness.
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see https://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# GHASH for PowerISA v2.07.
#
# July 2014
#
# Accurate performance measurements are problematic, because it's
# always virtualized setup with possibly throttled processor.
# Relative comparison is therefore more informative. This initial
# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
# faster than "4-bit" integer-only compiler-generated 64-bit code.
# "Initial version" means that there is room for futher improvement.
$flavour=shift;
$output =shift;
if ($flavour =~ /64/) {
$SIZE_T=8;
$LRSAVE=2*$SIZE_T;
$STU="stdu";
$POP="ld";
$PUSH="std";
} elsif ($flavour =~ /32/) {
$SIZE_T=4;
$LRSAVE=$SIZE_T;
$STU="stwu";
$POP="lwz";
$PUSH="stw";
} else { die "nonsense $flavour"; }
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
die "can't locate ppc-xlate.pl";
open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19));
my $vrsave="r12";
my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
$code=<<___;
.machine "any"
.text
.globl .gcm_init_p8
lis r0,0xfff0
li r8,0x10
mfspr $vrsave,256
li r9,0x20
mtspr 256,r0
li r10,0x30
lvx_u $H,0,r4 # load H
le?xor r7,r7,r7
le?addi r7,r7,0x8 # need a vperm start with 08
le?lvsr 5,0,r7
le?vspltisb 6,0x0f
le?vxor 5,5,6 # set a b-endian mask
le?vperm $H,$H,$H,5
vspltisb $xC2,-16 # 0xf0
vspltisb $t0,1 # one
vaddubm $xC2,$xC2,$xC2 # 0xe0
vxor $zero,$zero,$zero
vor $xC2,$xC2,$t0 # 0xe1
vsldoi $xC2,$xC2,$zero,15 # 0xe1...
vsldoi $t1,$zero,$t0,1 # ...1
vaddubm $xC2,$xC2,$xC2 # 0xc2...
vspltisb $t2,7
vor $xC2,$xC2,$t1 # 0xc2....01
vspltb $t1,$H,0 # most significant byte
vsl $H,$H,$t0 # H<<=1
vsrab $t1,$t1,$t2 # broadcast carry bit
vand $t1,$t1,$xC2
vxor $H,$H,$t1 # twisted H
vsldoi $H,$H,$H,8 # twist even more ...
vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
vsldoi $Hl,$zero,$H,8 # ... and split
vsldoi $Hh,$H,$zero,8
stvx_u $xC2,0,r3 # save pre-computed table
stvx_u $Hl,r8,r3
stvx_u $H, r9,r3
stvx_u $Hh,r10,r3
mtspr 256,$vrsave
blr
.long 0
.byte 0,12,0x14,0,0,0,2,0
.long 0
.size .gcm_init_p8,.-.gcm_init_p8
.globl .gcm_init_htable
lis r0,0xfff0
li r8,0x10
mfspr $vrsave,256
li r9,0x20
mtspr 256,r0
li r10,0x30
lvx_u $H,0,r4 # load H
vspltisb $xC2,-16 # 0xf0
vspltisb $t0,1 # one
vaddubm $xC2,$xC2,$xC2 # 0xe0
vxor $zero,$zero,$zero
vor $xC2,$xC2,$t0 # 0xe1
vsldoi $xC2,$xC2,$zero,15 # 0xe1...
vsldoi $t1,$zero,$t0,1 # ...1
vaddubm $xC2,$xC2,$xC2 # 0xc2...
vspltisb $t2,7
vor $xC2,$xC2,$t1 # 0xc2....01
vspltb $t1,$H,0 # most significant byte
vsl $H,$H,$t0 # H<<=1
vsrab $t1,$t1,$t2 # broadcast carry bit
vand $t1,$t1,$xC2
vxor $IN,$H,$t1 # twisted H
vsldoi $H,$IN,$IN,8 # twist even more ...
vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
vsldoi $Hl,$zero,$H,8 # ... and split
vsldoi $Hh,$H,$zero,8
stvx_u $xC2,0,r3 # save pre-computed table
stvx_u $Hl,r8,r3
li r8,0x40
stvx_u $H, r9,r3
li r9,0x50
stvx_u $Hh,r10,r3
li r10,0x60
vpmsumd $Xl,$IN,$Hl # H.lo·H.lo
vpmsumd $Xm,$IN,$H # H.hi·H.lo+H.lo·H.hi
vpmsumd $Xh,$IN,$Hh # H.hi·H.hi
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
vsldoi $t0,$Xm,$zero,8
vsldoi $t1,$zero,$Xm,8
vxor $Xl,$Xl,$t0
vxor $Xh,$Xh,$t1
vsldoi $Xl,$Xl,$Xl,8
vxor $Xl,$Xl,$t2
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
vpmsumd $Xl,$Xl,$xC2
vxor $t1,$t1,$Xh
vxor $IN1,$Xl,$t1
vsldoi $H2,$IN1,$IN1,8
vsldoi $H2l,$zero,$H2,8
vsldoi $H2h,$H2,$zero,8
stvx_u $H2l,r8,r3 # save H^2
li r8,0x70
stvx_u $H2,r9,r3
li r9,0x80
stvx_u $H2h,r10,r3
li r10,0x90
vpmsumd $Xl,$IN,$H2l # H.lo·H^2.lo
vpmsumd $Xl1,$IN1,$H2l # H^2.lo·H^2.lo
vpmsumd $Xm,$IN,$H2 # H.hi·H^2.lo+H.lo·H^2.hi
vpmsumd $Xm1,$IN1,$H2 # H^2.hi·H^2.lo+H^2.lo·H^2.hi
vpmsumd $Xh,$IN,$H2h # H.hi·H^2.hi
vpmsumd $Xh1,$IN1,$H2h # H^2.hi·H^2.hi
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
vpmsumd $t6,$Xl1,$xC2 # 1st reduction phase
vsldoi $t0,$Xm,$zero,8
vsldoi $t1,$zero,$Xm,8
vsldoi $t4,$Xm1,$zero,8
vsldoi $t5,$zero,$Xm1,8
vxor $Xl,$Xl,$t0
vxor $Xh,$Xh,$t1
vxor $Xl1,$Xl1,$t4
vxor $Xh1,$Xh1,$t5
vsldoi $Xl,$Xl,$Xl,8
vsldoi $Xl1,$Xl1,$Xl1,8
vxor $Xl,$Xl,$t2
vxor $Xl1,$Xl1,$t6
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase
vpmsumd $Xl,$Xl,$xC2
vpmsumd $Xl1,$Xl1,$xC2
vxor $t1,$t1,$Xh
vxor $t5,$t5,$Xh1
vxor $Xl,$Xl,$t1
vxor $Xl1,$Xl1,$t5
vsldoi $H,$Xl,$Xl,8
vsldoi $H2,$Xl1,$Xl1,8
vsldoi $Hl,$zero,$H,8
vsldoi $Hh,$H,$zero,8
vsldoi $H2l,$zero,$H2,8
vsldoi $H2h,$H2,$zero,8
stvx_u $Hl,r8,r3 # save H^3
li r8,0xa0
stvx_u $H,r9,r3
li r9,0xb0
stvx_u $Hh,r10,r3
li r10,0xc0
stvx_u $H2l,r8,r3 # save H^4
stvx_u $H2,r9,r3
stvx_u $H2h,r10,r3
mtspr 256,$vrsave
blr
.long 0
.byte 0,12,0x14,0,0,0,2,0
.long 0
.size .gcm_init_htable,.-.gcm_init_htable
.globl .gcm_gmult_p8
lis r0,0xfff8
li r8,0x10
mfspr $vrsave,256
li r9,0x20
mtspr 256,r0
li r10,0x30
lvx_u $IN,0,$Xip # load Xi
lvx_u $Hl,r8,$Htbl # load pre-computed table
le?lvsl $lemask,r0,r0
lvx_u $H, r9,$Htbl
le?vspltisb $t0,0x07
lvx_u $Hh,r10,$Htbl
le?vxor $lemask,$lemask,$t0
lvx_u $xC2,0,$Htbl
le?vperm $IN,$IN,$IN,$lemask
vxor $zero,$zero,$zero
vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
vpmsumd $t2,$Xl,$xC2 # 1st phase
vsldoi $t0,$Xm,$zero,8
vsldoi $t1,$zero,$Xm,8
vxor $Xl,$Xl,$t0
vxor $Xh,$Xh,$t1
vsldoi $Xl,$Xl,$Xl,8
vxor $Xl,$Xl,$t2
vsldoi $t1,$Xl,$Xl,8 # 2nd phase
vpmsumd $Xl,$Xl,$xC2
vxor $t1,$t1,$Xh
vxor $Xl,$Xl,$t1
le?vperm $Xl,$Xl,$Xl,$lemask
stvx_u $Xl,0,$Xip # write out Xi
mtspr 256,$vrsave
blr
.long 0
.byte 0,12,0x14,0,0,0,2,0
.long 0
.size .gcm_gmult_p8,.-.gcm_gmult_p8
.globl .gcm_ghash_p8
lis r0,0xfff8
li r8,0x10
mfspr $vrsave,256
li r9,0x20
mtspr 256,r0
li r10,0x30
lvx_u $Xl,0,$Xip # load Xi
lvx_u $Hl,r8,$Htbl # load pre-computed table
le?lvsl $lemask,r0,r0
lvx_u $H, r9,$Htbl
le?vspltisb $t0,0x07
lvx_u $Hh,r10,$Htbl
le?vxor $lemask,$lemask,$t0
lvx_u $xC2,0,$Htbl
le?vperm $Xl,$Xl,$Xl,$lemask
vxor $zero,$zero,$zero
lvx_u $IN,0,$inp
addi $inp,$inp,16
subi $len,$len,16
le?vperm $IN,$IN,$IN,$lemask
vxor $IN,$IN,$Xl
b Loop
.align 5
Loop:
subic $len,$len,16
vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
subfe. r0,r0,r0 # borrow?-1:0
vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
and r0,r0,$len
vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
add $inp,$inp,r0
vpmsumd $t2,$Xl,$xC2 # 1st phase
vsldoi $t0,$Xm,$zero,8
vsldoi $t1,$zero,$Xm,8
vxor $Xl,$Xl,$t0
vxor $Xh,$Xh,$t1
vsldoi $Xl,$Xl,$Xl,8
vxor $Xl,$Xl,$t2
lvx_u $IN,0,$inp
addi $inp,$inp,16
vsldoi $t1,$Xl,$Xl,8 # 2nd phase
vpmsumd $Xl,$Xl,$xC2
le?vperm $IN,$IN,$IN,$lemask
vxor $t1,$t1,$Xh
vxor $IN,$IN,$t1
vxor $IN,$IN,$Xl
beq Loop # did $len-=16 borrow?
vxor $Xl,$Xl,$t1
le?vperm $Xl,$Xl,$Xl,$lemask
stvx_u $Xl,0,$Xip # write out Xi
mtspr 256,$vrsave
blr
.long 0
.byte 0,12,0x14,0,0,0,4,0
.long 0
.size .gcm_ghash_p8,.-.gcm_ghash_p8
.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
___
foreach (split("\n",$code)) {
if ($flavour =~ /le$/o) { # little-endian
s/le\?//o or
s/be\?/#be#/o;
} else {
s/le\?/#le#/o or
s/be\?//o;
}
print $_,"\n";
}
close STDOUT; # enforce flush

View File

@ -0,0 +1,229 @@
#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
# PowerPC assembler distiller by <appro>.
my $flavour = shift;
my $output = shift;
open STDOUT,">$output" || die "can't open $output: $!";
my %GLOBALS;
my $dotinlocallabels=($flavour=~/linux/)?1:0;
################################################################
# directives which need special treatment on different platforms
################################################################
my $globl = sub {
my $junk = shift;
my $name = shift;
my $global = \$GLOBALS{$name};
my $ret;
$name =~ s|^[\.\_]||;
SWITCH: for ($flavour) {
/aix/ && do { $name = ".$name";
last;
};
/osx/ && do { $name = "_$name";
last;
};
/linux/
&& do { $ret = "_GLOBAL($name)";
last;
};
}
$ret = ".globl $name\nalign 5\n$name:" if (!$ret);
$$global = $name;
$ret;
};
my $text = sub {
my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text";
$ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/);
$ret;
};
my $machine = sub {
my $junk = shift;
my $arch = shift;
if ($flavour =~ /osx/)
{ $arch =~ s/\"//g;
$arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any");
}
".machine $arch";
};
my $size = sub {
if ($flavour =~ /linux/)
{ shift;
my $name = shift; $name =~ s|^[\.\_]||;
my $ret = ".size $name,.-".($flavour=~/64$/?".":"").$name;
$ret .= "\n.size .$name,.-.$name" if ($flavour=~/64$/);
$ret;
}
else
{ ""; }
};
my $asciz = sub {
shift;
my $line = join(",",@_);
if ($line =~ /^"(.*)"$/)
{ ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; }
else
{ ""; }
};
my $quad = sub {
shift;
my @ret;
my ($hi,$lo);
for (@_) {
if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io)
{ $hi=$1?"0x$1":"0"; $lo="0x$2"; }
elsif (/^([0-9]+)$/o)
{ $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl
else
{ $hi=undef; $lo=$_; }
if (defined($hi))
{ push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); }
else
{ push(@ret,".quad $lo"); }
}
join("\n",@ret);
};
################################################################
# simplified mnemonics not handled by at least one assembler
################################################################
my $cmplw = sub {
my $f = shift;
my $cr = 0; $cr = shift if ($#_>1);
# Some out-of-date 32-bit GNU assembler just can't handle cmplw...
($flavour =~ /linux.*32/) ?
" .long ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 :
" cmplw ".join(',',$cr,@_);
};
my $bdnz = sub {
my $f = shift;
my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint
" bc $bo,0,".shift;
} if ($flavour!~/linux/);
my $bltlr = sub {
my $f = shift;
my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint
($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
" .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 :
" bclr $bo,0";
};
my $bnelr = sub {
my $f = shift;
my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint
($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
" .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 :
" bclr $bo,2";
};
my $beqlr = sub {
my $f = shift;
my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint
($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
" .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 :
" bclr $bo,2";
};
# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two
# arguments is 64, with "operand out of range" error.
my $extrdi = sub {
my ($f,$ra,$rs,$n,$b) = @_;
$b = ($b+$n)&63; $n = 64-$n;
" rldicl $ra,$rs,$b,$n";
};
my $vmr = sub {
my ($f,$vx,$vy) = @_;
" vor $vx,$vy,$vy";
};
# Some ABIs specify vrsave, special-purpose register #256, as reserved
# for system use.
my $no_vrsave = ($flavour =~ /linux-ppc64le/);
my $mtspr = sub {
my ($f,$idx,$ra) = @_;
if ($idx == 256 && $no_vrsave) {
" or $ra,$ra,$ra";
} else {
" mtspr $idx,$ra";
}
};
my $mfspr = sub {
my ($f,$rd,$idx) = @_;
if ($idx == 256 && $no_vrsave) {
" li $rd,-1";
} else {
" mfspr $rd,$idx";
}
};
# PowerISA 2.06 stuff
sub vsxmem_op {
my ($f, $vrt, $ra, $rb, $op) = @_;
" .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
}
# made-up unaligned memory reference AltiVec/VMX instructions
my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x
my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x
my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx
my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx
my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x
my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x
# PowerISA 2.07 stuff
sub vcrypto_op {
my ($f, $vrt, $vra, $vrb, $op) = @_;
" .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op;
}
my $vcipher = sub { vcrypto_op(@_, 1288); };
my $vcipherlast = sub { vcrypto_op(@_, 1289); };
my $vncipher = sub { vcrypto_op(@_, 1352); };
my $vncipherlast= sub { vcrypto_op(@_, 1353); };
my $vsbox = sub { vcrypto_op(@_, 0, 1480); };
my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); };
my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); };
my $vpmsumb = sub { vcrypto_op(@_, 1032); };
my $vpmsumd = sub { vcrypto_op(@_, 1224); };
my $vpmsubh = sub { vcrypto_op(@_, 1096); };
my $vpmsumw = sub { vcrypto_op(@_, 1160); };
my $vaddudm = sub { vcrypto_op(@_, 192); };
my $vadduqm = sub { vcrypto_op(@_, 256); };
my $mtsle = sub {
my ($f, $arg) = @_;
" .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2);
};
print "#include <asm/ppc_asm.h>\n" if $flavour =~ /linux/;
while($line=<>) {
$line =~ s|[#!;].*$||; # get rid of asm-style comments...
$line =~ s|/\*.*\*/||; # ... and C-style comments...
$line =~ s|^\s+||; # ... and skip white spaces in beginning...
$line =~ s|\s+$||; # ... and at the end
{
$line =~ s|\b\.L(\w+)|L$1|g; # common denominator for Locallabel
$line =~ s|\bL(\w+)|\.L$1|g if ($dotinlocallabels);
}
{
$line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||;
my $c = $1; $c = "\t" if ($c eq "");
my $mnemonic = $2;
my $f = $3;
my $opcode = eval("\$$mnemonic");
$line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/);
if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); }
elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; }
}
print $line if ($line);
print "\n";
}
close STDOUT;

View File

@ -22,6 +22,7 @@
*/
#define PPC_MODULE_FEATURE_VEC_CRYPTO (32 + ilog2(PPC_FEATURE2_VEC_CRYPTO))
#define PPC_MODULE_FEATURE_P10 (32 + ilog2(PPC_FEATURE2_ARCH_3_1))
#define cpu_feature(x) (x)

View File

@ -201,8 +201,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
movdqa KEY, STATE4
/* load the constants: */
movdqa .Laegis128_const_0, STATE2
movdqa .Laegis128_const_1, STATE1
movdqa .Laegis128_const_0(%rip), STATE2
movdqa .Laegis128_const_1(%rip), STATE1
pxor STATE2, STATE3
pxor STATE1, STATE4
@ -682,7 +682,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
punpcklbw T0, T0
punpcklbw T0, T0
punpcklbw T0, T0
movdqa .Laegis128_counter, T1
movdqa .Laegis128_counter(%rip), T1
pcmpgtb T1, T0
pand T0, MSG

View File

@ -288,53 +288,53 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
# Encrypt/Decrypt first few blocks
and $(3<<4), %r12
jz _initial_num_blocks_is_0_\@
jz .L_initial_num_blocks_is_0_\@
cmp $(2<<4), %r12
jb _initial_num_blocks_is_1_\@
je _initial_num_blocks_is_2_\@
_initial_num_blocks_is_3_\@:
jb .L_initial_num_blocks_is_1_\@
je .L_initial_num_blocks_is_2_\@
.L_initial_num_blocks_is_3_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation
sub $48, %r13
jmp _initial_blocks_\@
_initial_num_blocks_is_2_\@:
jmp .L_initial_blocks_\@
.L_initial_num_blocks_is_2_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation
sub $32, %r13
jmp _initial_blocks_\@
_initial_num_blocks_is_1_\@:
jmp .L_initial_blocks_\@
.L_initial_num_blocks_is_1_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation
sub $16, %r13
jmp _initial_blocks_\@
_initial_num_blocks_is_0_\@:
jmp .L_initial_blocks_\@
.L_initial_num_blocks_is_0_\@:
INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation
_initial_blocks_\@:
.L_initial_blocks_\@:
# Main loop - Encrypt/Decrypt remaining blocks
test %r13, %r13
je _zero_cipher_left_\@
je .L_zero_cipher_left_\@
sub $64, %r13
je _four_cipher_left_\@
_crypt_by_4_\@:
je .L_four_cipher_left_\@
.L_crypt_by_4_\@:
GHASH_4_ENCRYPT_4_PARALLEL_\operation %xmm9, %xmm10, %xmm11, %xmm12, \
%xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \
%xmm7, %xmm8, enc
add $64, %r11
sub $64, %r13
jne _crypt_by_4_\@
_four_cipher_left_\@:
jne .L_crypt_by_4_\@
.L_four_cipher_left_\@:
GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
_zero_cipher_left_\@:
.L_zero_cipher_left_\@:
movdqu %xmm8, AadHash(%arg2)
movdqu %xmm0, CurCount(%arg2)
mov %arg5, %r13
and $15, %r13 # %r13 = arg5 (mod 16)
je _multiple_of_16_bytes_\@
je .L_multiple_of_16_bytes_\@
mov %r13, PBlockLen(%arg2)
@ -348,14 +348,14 @@ _zero_cipher_left_\@:
movdqu %xmm0, PBlockEncKey(%arg2)
cmp $16, %arg5
jge _large_enough_update_\@
jge .L_large_enough_update_\@
lea (%arg4,%r11,1), %r10
mov %r13, %r12
READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
jmp _data_read_\@
jmp .L_data_read_\@
_large_enough_update_\@:
.L_large_enough_update_\@:
sub $16, %r11
add %r13, %r11
@ -374,7 +374,7 @@ _large_enough_update_\@:
# shift right 16-r13 bytes
pshufb %xmm2, %xmm1
_data_read_\@:
.L_data_read_\@:
lea ALL_F+16(%rip), %r12
sub %r13, %r12
@ -409,19 +409,19 @@ _data_read_\@:
# Output %r13 bytes
movq %xmm0, %rax
cmp $8, %r13
jle _less_than_8_bytes_left_\@
jle .L_less_than_8_bytes_left_\@
mov %rax, (%arg3 , %r11, 1)
add $8, %r11
psrldq $8, %xmm0
movq %xmm0, %rax
sub $8, %r13
_less_than_8_bytes_left_\@:
.L_less_than_8_bytes_left_\@:
mov %al, (%arg3, %r11, 1)
add $1, %r11
shr $8, %rax
sub $1, %r13
jne _less_than_8_bytes_left_\@
_multiple_of_16_bytes_\@:
jne .L_less_than_8_bytes_left_\@
.L_multiple_of_16_bytes_\@:
.endm
# GCM_COMPLETE Finishes update of tag of last partial block
@ -434,11 +434,11 @@ _multiple_of_16_bytes_\@:
mov PBlockLen(%arg2), %r12
test %r12, %r12
je _partial_done\@
je .L_partial_done\@
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
_partial_done\@:
.L_partial_done\@:
mov AadLen(%arg2), %r12 # %r13 = aadLen (number of bytes)
shl $3, %r12 # convert into number of bits
movd %r12d, %xmm15 # len(A) in %xmm15
@ -457,44 +457,44 @@ _partial_done\@:
movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
pxor %xmm8, %xmm0
_return_T_\@:
.L_return_T_\@:
mov \AUTHTAG, %r10 # %r10 = authTag
mov \AUTHTAGLEN, %r11 # %r11 = auth_tag_len
cmp $16, %r11
je _T_16_\@
je .L_T_16_\@
cmp $8, %r11
jl _T_4_\@
_T_8_\@:
jl .L_T_4_\@
.L_T_8_\@:
movq %xmm0, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
psrldq $8, %xmm0
test %r11, %r11
je _return_T_done_\@
_T_4_\@:
je .L_return_T_done_\@
.L_T_4_\@:
movd %xmm0, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
psrldq $4, %xmm0
test %r11, %r11
je _return_T_done_\@
_T_123_\@:
je .L_return_T_done_\@
.L_T_123_\@:
movd %xmm0, %eax
cmp $2, %r11
jl _T_1_\@
jl .L_T_1_\@
mov %ax, (%r10)
cmp $2, %r11
je _return_T_done_\@
je .L_return_T_done_\@
add $2, %r10
sar $16, %eax
_T_1_\@:
.L_T_1_\@:
mov %al, (%r10)
jmp _return_T_done_\@
_T_16_\@:
jmp .L_return_T_done_\@
.L_T_16_\@:
movdqu %xmm0, (%r10)
_return_T_done_\@:
.L_return_T_done_\@:
.endm
#ifdef __x86_64__
@ -563,30 +563,30 @@ _return_T_done_\@:
# Clobbers %rax, DLEN and XMM1
.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
cmp $8, \DLEN
jl _read_lt8_\@
jl .L_read_lt8_\@
mov (\DPTR), %rax
movq %rax, \XMMDst
sub $8, \DLEN
jz _done_read_partial_block_\@
jz .L_done_read_partial_block_\@
xor %eax, %eax
_read_next_byte_\@:
.L_read_next_byte_\@:
shl $8, %rax
mov 7(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_\@
jnz .L_read_next_byte_\@
movq %rax, \XMM1
pslldq $8, \XMM1
por \XMM1, \XMMDst
jmp _done_read_partial_block_\@
_read_lt8_\@:
jmp .L_done_read_partial_block_\@
.L_read_lt8_\@:
xor %eax, %eax
_read_next_byte_lt8_\@:
.L_read_next_byte_lt8_\@:
shl $8, %rax
mov -1(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_lt8_\@
jnz .L_read_next_byte_lt8_\@
movq %rax, \XMMDst
_done_read_partial_block_\@:
.L_done_read_partial_block_\@:
.endm
# CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
@ -600,8 +600,8 @@ _done_read_partial_block_\@:
pxor \TMP6, \TMP6
cmp $16, %r11
jl _get_AAD_rest\@
_get_AAD_blocks\@:
jl .L_get_AAD_rest\@
.L_get_AAD_blocks\@:
movdqu (%r10), \TMP7
pshufb %xmm14, \TMP7 # byte-reflect the AAD data
pxor \TMP7, \TMP6
@ -609,14 +609,14 @@ _get_AAD_blocks\@:
add $16, %r10
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\@
jge .L_get_AAD_blocks\@
movdqu \TMP6, \TMP7
/* read the last <16B of AAD */
_get_AAD_rest\@:
.L_get_AAD_rest\@:
test %r11, %r11
je _get_AAD_done\@
je .L_get_AAD_done\@
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
pshufb %xmm14, \TMP7 # byte-reflect the AAD data
@ -624,7 +624,7 @@ _get_AAD_rest\@:
GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
movdqu \TMP7, \TMP6
_get_AAD_done\@:
.L_get_AAD_done\@:
movdqu \TMP6, AadHash(%arg2)
.endm
@ -637,21 +637,21 @@ _get_AAD_done\@:
AAD_HASH operation
mov PBlockLen(%arg2), %r13
test %r13, %r13
je _partial_block_done_\@ # Leave Macro if no partial blocks
je .L_partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
jl _fewer_than_16_bytes_\@
jl .L_fewer_than_16_bytes_\@
movups (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
jmp _data_read_\@
jmp .L_data_read_\@
_fewer_than_16_bytes_\@:
.L_fewer_than_16_bytes_\@:
lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
mov \PLAIN_CYPH_LEN, %r12
READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1
mov PBlockLen(%arg2), %r13
_data_read_\@: # Finished reading in data
.L_data_read_\@: # Finished reading in data
movdqu PBlockEncKey(%arg2), %xmm9
movdqu HashKey(%arg2), %xmm13
@ -674,9 +674,9 @@ _data_read_\@: # Finished reading in data
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
jge _no_extra_mask_1_\@
jge .L_no_extra_mask_1_\@
sub %r10, %r12
_no_extra_mask_1_\@:
.L_no_extra_mask_1_\@:
movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@ -689,17 +689,17 @@ _no_extra_mask_1_\@:
pxor %xmm3, \AAD_HASH
test %r10, %r10
jl _partial_incomplete_1_\@
jl .L_partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _dec_done_\@
_partial_incomplete_1_\@:
jmp .L_dec_done_\@
.L_partial_incomplete_1_\@:
add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
_dec_done_\@:
.L_dec_done_\@:
movdqu \AAD_HASH, AadHash(%arg2)
.else
pxor %xmm1, %xmm9 # Plaintext XOR E(K, Yn)
@ -710,9 +710,9 @@ _dec_done_\@:
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
jge _no_extra_mask_2_\@
jge .L_no_extra_mask_2_\@
sub %r10, %r12
_no_extra_mask_2_\@:
.L_no_extra_mask_2_\@:
movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@ -724,17 +724,17 @@ _no_extra_mask_2_\@:
pxor %xmm9, \AAD_HASH
test %r10, %r10
jl _partial_incomplete_2_\@
jl .L_partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _encode_done_\@
_partial_incomplete_2_\@:
jmp .L_encode_done_\@
.L_partial_incomplete_2_\@:
add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
_encode_done_\@:
.L_encode_done_\@:
movdqu \AAD_HASH, AadHash(%arg2)
movdqa SHUF_MASK(%rip), %xmm10
@ -744,32 +744,32 @@ _encode_done_\@:
.endif
# output encrypted Bytes
test %r10, %r10
jl _partial_fill_\@
jl .L_partial_fill_\@
mov %r13, %r12
mov $16, %r13
# Set r13 to be the number of bytes to write out
sub %r12, %r13
jmp _count_set_\@
_partial_fill_\@:
jmp .L_count_set_\@
.L_partial_fill_\@:
mov \PLAIN_CYPH_LEN, %r13
_count_set_\@:
.L_count_set_\@:
movdqa %xmm9, %xmm0
movq %xmm0, %rax
cmp $8, %r13
jle _less_than_8_bytes_left_\@
jle .L_less_than_8_bytes_left_\@
mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $8, \DATA_OFFSET
psrldq $8, %xmm0
movq %xmm0, %rax
sub $8, %r13
_less_than_8_bytes_left_\@:
.L_less_than_8_bytes_left_\@:
movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $1, \DATA_OFFSET
shr $8, %rax
sub $1, %r13
jne _less_than_8_bytes_left_\@
_partial_block_done_\@:
jne .L_less_than_8_bytes_left_\@
.L_partial_block_done_\@:
.endm # PARTIAL_BLOCK
/*
@ -813,14 +813,14 @@ _partial_block_done_\@:
shr $2,%eax # 128->4, 192->6, 256->8
add $5,%eax # 128->9, 192->11, 256->13
aes_loop_initial_\@:
.Laes_loop_initial_\@:
MOVADQ (%r10),\TMP1
.irpc index, \i_seq
aesenc \TMP1, %xmm\index
.endr
add $16,%r10
sub $1,%eax
jnz aes_loop_initial_\@
jnz .Laes_loop_initial_\@
MOVADQ (%r10), \TMP1
.irpc index, \i_seq
@ -861,7 +861,7 @@ aes_loop_initial_\@:
GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
.endif
cmp $64, %r13
jl _initial_blocks_done\@
jl .L_initial_blocks_done\@
# no need for precomputed values
/*
*
@ -908,18 +908,18 @@ aes_loop_initial_\@:
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
jz aes_loop_pre_done\@
jz .Laes_loop_pre_done\@
aes_loop_pre_\@:
.Laes_loop_pre_\@:
MOVADQ (%r10),\TMP2
.irpc index, 1234
aesenc \TMP2, %xmm\index
.endr
add $16,%r10
sub $1,%eax
jnz aes_loop_pre_\@
jnz .Laes_loop_pre_\@
aes_loop_pre_done\@:
.Laes_loop_pre_done\@:
MOVADQ (%r10), \TMP2
aesenclast \TMP2, \XMM1
aesenclast \TMP2, \XMM2
@ -963,7 +963,7 @@ aes_loop_pre_done\@:
pshufb %xmm14, \XMM3 # perform a 16 byte swap
pshufb %xmm14, \XMM4 # perform a 16 byte swap
_initial_blocks_done\@:
.L_initial_blocks_done\@:
.endm
@ -1095,18 +1095,18 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
jz aes_loop_par_enc_done\@
jz .Laes_loop_par_enc_done\@
aes_loop_par_enc\@:
.Laes_loop_par_enc\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
jnz aes_loop_par_enc\@
jnz .Laes_loop_par_enc\@
aes_loop_par_enc_done\@:
.Laes_loop_par_enc_done\@:
MOVADQ (%r10), \TMP3
aesenclast \TMP3, \XMM1 # Round 10
aesenclast \TMP3, \XMM2
@ -1303,18 +1303,18 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
jz aes_loop_par_dec_done\@
jz .Laes_loop_par_dec_done\@
aes_loop_par_dec\@:
.Laes_loop_par_dec\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
jnz aes_loop_par_dec\@
jnz .Laes_loop_par_dec\@
aes_loop_par_dec_done\@:
.Laes_loop_par_dec_done\@:
MOVADQ (%r10), \TMP3
aesenclast \TMP3, \XMM1 # last round
aesenclast \TMP3, \XMM2
@ -2717,7 +2717,7 @@ SYM_FUNC_END(aesni_cts_cbc_dec)
* BSWAP_MASK == endian swapping mask
*/
SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
movaps .Lbswap_mask(%rip), BSWAP_MASK
movaps IV, CTR
pshufb BSWAP_MASK, CTR
mov $1, TCTR_LOW

View File

@ -154,30 +154,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000
.section .rodata
.align 16
.type aad_shift_arr, @object
.size aad_shift_arr, 272
aad_shift_arr:
.octa 0xffffffffffffffffffffffffffffffff
.octa 0xffffffffffffffffffffffffffffff0C
.octa 0xffffffffffffffffffffffffffff0D0C
.octa 0xffffffffffffffffffffffffff0E0D0C
.octa 0xffffffffffffffffffffffff0F0E0D0C
.octa 0xffffffffffffffffffffff0C0B0A0908
.octa 0xffffffffffffffffffff0D0C0B0A0908
.octa 0xffffffffffffffffff0E0D0C0B0A0908
.octa 0xffffffffffffffff0F0E0D0C0B0A0908
.octa 0xffffffffffffff0C0B0A090807060504
.octa 0xffffffffffff0D0C0B0A090807060504
.octa 0xffffffffff0E0D0C0B0A090807060504
.octa 0xffffffff0F0E0D0C0B0A090807060504
.octa 0xffffff0C0B0A09080706050403020100
.octa 0xffff0D0C0B0A09080706050403020100
.octa 0xff0E0D0C0B0A09080706050403020100
.octa 0x0F0E0D0C0B0A09080706050403020100
.text
@ -302,68 +278,68 @@ VARIABLE_OFFSET = 16*8
mov %r13, %r12
shr $4, %r12
and $7, %r12
jz _initial_num_blocks_is_0\@
jz .L_initial_num_blocks_is_0\@
cmp $7, %r12
je _initial_num_blocks_is_7\@
je .L_initial_num_blocks_is_7\@
cmp $6, %r12
je _initial_num_blocks_is_6\@
je .L_initial_num_blocks_is_6\@
cmp $5, %r12
je _initial_num_blocks_is_5\@
je .L_initial_num_blocks_is_5\@
cmp $4, %r12
je _initial_num_blocks_is_4\@
je .L_initial_num_blocks_is_4\@
cmp $3, %r12
je _initial_num_blocks_is_3\@
je .L_initial_num_blocks_is_3\@
cmp $2, %r12
je _initial_num_blocks_is_2\@
je .L_initial_num_blocks_is_2\@
jmp _initial_num_blocks_is_1\@
jmp .L_initial_num_blocks_is_1\@
_initial_num_blocks_is_7\@:
.L_initial_num_blocks_is_7\@:
\INITIAL_BLOCKS \REP, 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*7, %r13
jmp _initial_blocks_encrypted\@
jmp .L_initial_blocks_encrypted\@
_initial_num_blocks_is_6\@:
.L_initial_num_blocks_is_6\@:
\INITIAL_BLOCKS \REP, 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*6, %r13
jmp _initial_blocks_encrypted\@
jmp .L_initial_blocks_encrypted\@
_initial_num_blocks_is_5\@:
.L_initial_num_blocks_is_5\@:
\INITIAL_BLOCKS \REP, 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*5, %r13
jmp _initial_blocks_encrypted\@
jmp .L_initial_blocks_encrypted\@
_initial_num_blocks_is_4\@:
.L_initial_num_blocks_is_4\@:
\INITIAL_BLOCKS \REP, 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*4, %r13
jmp _initial_blocks_encrypted\@
jmp .L_initial_blocks_encrypted\@
_initial_num_blocks_is_3\@:
.L_initial_num_blocks_is_3\@:
\INITIAL_BLOCKS \REP, 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*3, %r13
jmp _initial_blocks_encrypted\@
jmp .L_initial_blocks_encrypted\@
_initial_num_blocks_is_2\@:
.L_initial_num_blocks_is_2\@:
\INITIAL_BLOCKS \REP, 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*2, %r13
jmp _initial_blocks_encrypted\@
jmp .L_initial_blocks_encrypted\@
_initial_num_blocks_is_1\@:
.L_initial_num_blocks_is_1\@:
\INITIAL_BLOCKS \REP, 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
sub $16*1, %r13
jmp _initial_blocks_encrypted\@
jmp .L_initial_blocks_encrypted\@
_initial_num_blocks_is_0\@:
.L_initial_num_blocks_is_0\@:
\INITIAL_BLOCKS \REP, 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
_initial_blocks_encrypted\@:
.L_initial_blocks_encrypted\@:
test %r13, %r13
je _zero_cipher_left\@
je .L_zero_cipher_left\@
sub $128, %r13
je _eight_cipher_left\@
je .L_eight_cipher_left\@
@ -373,9 +349,9 @@ _initial_blocks_encrypted\@:
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
_encrypt_by_8_new\@:
.L_encrypt_by_8_new\@:
cmp $(255-8), %r15d
jg _encrypt_by_8\@
jg .L_encrypt_by_8\@
@ -383,30 +359,30 @@ _encrypt_by_8_new\@:
\GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
add $128, %r11
sub $128, %r13
jne _encrypt_by_8_new\@
jne .L_encrypt_by_8_new\@
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
jmp _eight_cipher_left\@
jmp .L_eight_cipher_left\@
_encrypt_by_8\@:
.L_encrypt_by_8\@:
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
add $8, %r15b
\GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
add $128, %r11
sub $128, %r13
jne _encrypt_by_8_new\@
jne .L_encrypt_by_8_new\@
vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
_eight_cipher_left\@:
.L_eight_cipher_left\@:
\GHASH_LAST_8 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
_zero_cipher_left\@:
.L_zero_cipher_left\@:
vmovdqu %xmm14, AadHash(arg2)
vmovdqu %xmm9, CurCount(arg2)
@ -414,7 +390,7 @@ _zero_cipher_left\@:
mov arg5, %r13
and $15, %r13 # r13 = (arg5 mod 16)
je _multiple_of_16_bytes\@
je .L_multiple_of_16_bytes\@
# handle the last <16 Byte block separately
@ -428,7 +404,7 @@ _zero_cipher_left\@:
vmovdqu %xmm9, PBlockEncKey(arg2)
cmp $16, arg5
jge _large_enough_update\@
jge .L_large_enough_update\@
lea (arg4,%r11,1), %r10
mov %r13, %r12
@ -440,9 +416,9 @@ _zero_cipher_left\@:
# able to shift 16-r13 bytes (r13 is the
# number of bytes in plaintext mod 16)
jmp _final_ghash_mul\@
jmp .L_final_ghash_mul\@
_large_enough_update\@:
.L_large_enough_update\@:
sub $16, %r11
add %r13, %r11
@ -461,7 +437,7 @@ _large_enough_update\@:
# shift right 16-r13 bytes
vpshufb %xmm2, %xmm1, %xmm1
_final_ghash_mul\@:
.L_final_ghash_mul\@:
.if \ENC_DEC == DEC
vmovdqa %xmm1, %xmm2
vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
@ -490,7 +466,7 @@ _final_ghash_mul\@:
# output r13 Bytes
vmovq %xmm9, %rax
cmp $8, %r13
jle _less_than_8_bytes_left\@
jle .L_less_than_8_bytes_left\@
mov %rax, (arg3 , %r11)
add $8, %r11
@ -498,15 +474,15 @@ _final_ghash_mul\@:
vmovq %xmm9, %rax
sub $8, %r13
_less_than_8_bytes_left\@:
.L_less_than_8_bytes_left\@:
movb %al, (arg3 , %r11)
add $1, %r11
shr $8, %rax
sub $1, %r13
jne _less_than_8_bytes_left\@
jne .L_less_than_8_bytes_left\@
#############################
_multiple_of_16_bytes\@:
.L_multiple_of_16_bytes\@:
.endm
@ -519,12 +495,12 @@ _multiple_of_16_bytes\@:
mov PBlockLen(arg2), %r12
test %r12, %r12
je _partial_done\@
je .L_partial_done\@
#GHASH computation for the last <16 Byte block
\GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
_partial_done\@:
.L_partial_done\@:
mov AadLen(arg2), %r12 # r12 = aadLen (number of bytes)
shl $3, %r12 # convert into number of bits
vmovd %r12d, %xmm15 # len(A) in xmm15
@ -547,49 +523,49 @@ _partial_done\@:
_return_T\@:
.L_return_T\@:
mov \AUTH_TAG, %r10 # r10 = authTag
mov \AUTH_TAG_LEN, %r11 # r11 = auth_tag_len
cmp $16, %r11
je _T_16\@
je .L_T_16\@
cmp $8, %r11
jl _T_4\@
jl .L_T_4\@
_T_8\@:
.L_T_8\@:
vmovq %xmm9, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
vpsrldq $8, %xmm9, %xmm9
test %r11, %r11
je _return_T_done\@
_T_4\@:
je .L_return_T_done\@
.L_T_4\@:
vmovd %xmm9, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
vpsrldq $4, %xmm9, %xmm9
test %r11, %r11
je _return_T_done\@
_T_123\@:
je .L_return_T_done\@
.L_T_123\@:
vmovd %xmm9, %eax
cmp $2, %r11
jl _T_1\@
jl .L_T_1\@
mov %ax, (%r10)
cmp $2, %r11
je _return_T_done\@
je .L_return_T_done\@
add $2, %r10
sar $16, %eax
_T_1\@:
.L_T_1\@:
mov %al, (%r10)
jmp _return_T_done\@
jmp .L_return_T_done\@
_T_16\@:
.L_T_16\@:
vmovdqu %xmm9, (%r10)
_return_T_done\@:
.L_return_T_done\@:
.endm
.macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
@ -603,8 +579,8 @@ _return_T_done\@:
vpxor \T8, \T8, \T8
vpxor \T7, \T7, \T7
cmp $16, %r11
jl _get_AAD_rest8\@
_get_AAD_blocks\@:
jl .L_get_AAD_rest8\@
.L_get_AAD_blocks\@:
vmovdqu (%r10), \T7
vpshufb SHUF_MASK(%rip), \T7, \T7
vpxor \T7, \T8, \T8
@ -613,29 +589,29 @@ _get_AAD_blocks\@:
sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\@
jge .L_get_AAD_blocks\@
vmovdqu \T8, \T7
test %r11, %r11
je _get_AAD_done\@
je .L_get_AAD_done\@
vpxor \T7, \T7, \T7
/* read the last <16B of AAD. since we have at least 4B of
data right after the AAD (the ICV, and maybe some CT), we can
read 4B/8B blocks safely, and then get rid of the extra stuff */
_get_AAD_rest8\@:
.L_get_AAD_rest8\@:
cmp $4, %r11
jle _get_AAD_rest4\@
jle .L_get_AAD_rest4\@
movq (%r10), \T1
add $8, %r10
sub $8, %r11
vpslldq $8, \T1, \T1
vpsrldq $8, \T7, \T7
vpxor \T1, \T7, \T7
jmp _get_AAD_rest8\@
_get_AAD_rest4\@:
jmp .L_get_AAD_rest8\@
.L_get_AAD_rest4\@:
test %r11, %r11
jle _get_AAD_rest0\@
jle .L_get_AAD_rest0\@
mov (%r10), %eax
movq %rax, \T1
add $4, %r10
@ -643,20 +619,22 @@ _get_AAD_rest4\@:
vpslldq $12, \T1, \T1
vpsrldq $4, \T7, \T7
vpxor \T1, \T7, \T7
_get_AAD_rest0\@:
.L_get_AAD_rest0\@:
/* finalize: shift out the extra bytes we read, and align
left. since pslldq can only shift by an immediate, we use
vpshufb and an array of shuffle masks */
movq %r12, %r11
salq $4, %r11
vmovdqu aad_shift_arr(%r11), \T1
vpshufb \T1, \T7, \T7
_get_AAD_rest_final\@:
vpshufb and a pair of shuffle masks */
leaq ALL_F(%rip), %r11
subq %r12, %r11
vmovdqu 16(%r11), \T1
andq $~3, %r11
vpshufb (%r11), \T7, \T7
vpand \T1, \T7, \T7
.L_get_AAD_rest_final\@:
vpshufb SHUF_MASK(%rip), \T7, \T7
vpxor \T8, \T7, \T7
\GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
_get_AAD_done\@:
.L_get_AAD_done\@:
vmovdqu \T7, AadHash(arg2)
.endm
@ -707,28 +685,28 @@ _get_AAD_done\@:
vpxor \XMMDst, \XMMDst, \XMMDst
cmp $8, \DLEN
jl _read_lt8_\@
jl .L_read_lt8_\@
mov (\DPTR), %rax
vpinsrq $0, %rax, \XMMDst, \XMMDst
sub $8, \DLEN
jz _done_read_partial_block_\@
jz .L_done_read_partial_block_\@
xor %eax, %eax
_read_next_byte_\@:
.L_read_next_byte_\@:
shl $8, %rax
mov 7(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_\@
jnz .L_read_next_byte_\@
vpinsrq $1, %rax, \XMMDst, \XMMDst
jmp _done_read_partial_block_\@
_read_lt8_\@:
jmp .L_done_read_partial_block_\@
.L_read_lt8_\@:
xor %eax, %eax
_read_next_byte_lt8_\@:
.L_read_next_byte_lt8_\@:
shl $8, %rax
mov -1(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_lt8_\@
jnz .L_read_next_byte_lt8_\@
vpinsrq $0, %rax, \XMMDst, \XMMDst
_done_read_partial_block_\@:
.L_done_read_partial_block_\@:
.endm
# PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks
@ -740,21 +718,21 @@ _done_read_partial_block_\@:
AAD_HASH ENC_DEC
mov PBlockLen(arg2), %r13
test %r13, %r13
je _partial_block_done_\@ # Leave Macro if no partial blocks
je .L_partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
jl _fewer_than_16_bytes_\@
jl .L_fewer_than_16_bytes_\@
vmovdqu (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
jmp _data_read_\@
jmp .L_data_read_\@
_fewer_than_16_bytes_\@:
.L_fewer_than_16_bytes_\@:
lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
mov \PLAIN_CYPH_LEN, %r12
READ_PARTIAL_BLOCK %r10 %r12 %xmm1
mov PBlockLen(arg2), %r13
_data_read_\@: # Finished reading in data
.L_data_read_\@: # Finished reading in data
vmovdqu PBlockEncKey(arg2), %xmm9
vmovdqu HashKey(arg2), %xmm13
@ -777,9 +755,9 @@ _data_read_\@: # Finished reading in data
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
jge _no_extra_mask_1_\@
jge .L_no_extra_mask_1_\@
sub %r10, %r12
_no_extra_mask_1_\@:
.L_no_extra_mask_1_\@:
vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@ -792,17 +770,17 @@ _no_extra_mask_1_\@:
vpxor %xmm3, \AAD_HASH, \AAD_HASH
test %r10, %r10
jl _partial_incomplete_1_\@
jl .L_partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
\GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax,%eax
mov %rax, PBlockLen(arg2)
jmp _dec_done_\@
_partial_incomplete_1_\@:
jmp .L_dec_done_\@
.L_partial_incomplete_1_\@:
add \PLAIN_CYPH_LEN, PBlockLen(arg2)
_dec_done_\@:
.L_dec_done_\@:
vmovdqu \AAD_HASH, AadHash(arg2)
.else
vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
@ -813,9 +791,9 @@ _dec_done_\@:
sub $16, %r10
# Determine if if partial block is not being filled and
# shift mask accordingly
jge _no_extra_mask_2_\@
jge .L_no_extra_mask_2_\@
sub %r10, %r12
_no_extra_mask_2_\@:
.L_no_extra_mask_2_\@:
vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1
# get the appropriate mask to mask out bottom r13 bytes of xmm9
@ -827,17 +805,17 @@ _no_extra_mask_2_\@:
vpxor %xmm9, \AAD_HASH, \AAD_HASH
test %r10, %r10
jl _partial_incomplete_2_\@
jl .L_partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
\GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
xor %eax,%eax
mov %rax, PBlockLen(arg2)
jmp _encode_done_\@
_partial_incomplete_2_\@:
jmp .L_encode_done_\@
.L_partial_incomplete_2_\@:
add \PLAIN_CYPH_LEN, PBlockLen(arg2)
_encode_done_\@:
.L_encode_done_\@:
vmovdqu \AAD_HASH, AadHash(arg2)
vmovdqa SHUF_MASK(%rip), %xmm10
@ -847,32 +825,32 @@ _encode_done_\@:
.endif
# output encrypted Bytes
test %r10, %r10
jl _partial_fill_\@
jl .L_partial_fill_\@
mov %r13, %r12
mov $16, %r13
# Set r13 to be the number of bytes to write out
sub %r12, %r13
jmp _count_set_\@
_partial_fill_\@:
jmp .L_count_set_\@
.L_partial_fill_\@:
mov \PLAIN_CYPH_LEN, %r13
_count_set_\@:
.L_count_set_\@:
vmovdqa %xmm9, %xmm0
vmovq %xmm0, %rax
cmp $8, %r13
jle _less_than_8_bytes_left_\@
jle .L_less_than_8_bytes_left_\@
mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $8, \DATA_OFFSET
psrldq $8, %xmm0
vmovq %xmm0, %rax
sub $8, %r13
_less_than_8_bytes_left_\@:
.L_less_than_8_bytes_left_\@:
movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $1, \DATA_OFFSET
shr $8, %rax
sub $1, %r13
jne _less_than_8_bytes_left_\@
_partial_block_done_\@:
jne .L_less_than_8_bytes_left_\@
.L_partial_block_done_\@:
.endm # PARTIAL_BLOCK
###############################################################################
@ -1073,7 +1051,7 @@ _partial_block_done_\@:
vmovdqa \XMM8, \T3
cmp $128, %r13
jl _initial_blocks_done\@ # no need for precomputed constants
jl .L_initial_blocks_done\@ # no need for precomputed constants
###############################################################################
# Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
@ -1215,7 +1193,7 @@ _partial_block_done_\@:
###############################################################################
_initial_blocks_done\@:
.L_initial_blocks_done\@:
.endm
@ -2023,7 +2001,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
vmovdqa \XMM8, \T3
cmp $128, %r13
jl _initial_blocks_done\@ # no need for precomputed constants
jl .L_initial_blocks_done\@ # no need for precomputed constants
###############################################################################
# Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
@ -2167,7 +2145,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
###############################################################################
_initial_blocks_done\@:
.L_initial_blocks_done\@:
.endm

View File

@ -80,7 +80,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vmovdqu .Lshufb_16x16b, a0; \
vmovdqu .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -132,7 +132,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vmovdqu .Lshufb_16x16b, a0; \
vmovdqu .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -300,11 +300,11 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
vmovdqa .Ltf_s2_bitmatrix, t0; \
vmovdqa .Ltf_inv_bitmatrix, t1; \
vmovdqa .Ltf_id_bitmatrix, t2; \
vmovdqa .Ltf_aff_bitmatrix, t3; \
vmovdqa .Ltf_x2_bitmatrix, t4; \
vmovdqa .Ltf_s2_bitmatrix(%rip), t0; \
vmovdqa .Ltf_inv_bitmatrix(%rip), t1; \
vmovdqa .Ltf_id_bitmatrix(%rip), t2; \
vmovdqa .Ltf_aff_bitmatrix(%rip), t3; \
vmovdqa .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
@ -324,13 +324,13 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
vmovdqa .Linv_shift_row, t0; \
vmovdqa .Lshift_row, t1; \
vbroadcastss .L0f0f0f0f, t6; \
vmovdqa .Ltf_lo__inv_aff__and__s2, t2; \
vmovdqa .Ltf_hi__inv_aff__and__s2, t3; \
vmovdqa .Ltf_lo__x2__and__fwd_aff, t4; \
vmovdqa .Ltf_hi__x2__and__fwd_aff, t5; \
vmovdqa .Linv_shift_row(%rip), t0; \
vmovdqa .Lshift_row(%rip), t1; \
vbroadcastss .L0f0f0f0f(%rip), t6; \
vmovdqa .Ltf_lo__inv_aff__and__s2(%rip), t2; \
vmovdqa .Ltf_hi__inv_aff__and__s2(%rip), t3; \
vmovdqa .Ltf_lo__x2__and__fwd_aff(%rip), t4; \
vmovdqa .Ltf_hi__x2__and__fwd_aff(%rip), t5; \
\
vaesenclast t7, x0, x0; \
vaesenclast t7, x4, x4; \

View File

@ -96,7 +96,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vbroadcasti128 .Lshufb_16x16b, a0; \
vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -148,7 +148,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vbroadcasti128 .Lshufb_16x16b, a0; \
vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -307,11 +307,11 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
vpbroadcastq .Ltf_s2_bitmatrix, t0; \
vpbroadcastq .Ltf_inv_bitmatrix, t1; \
vpbroadcastq .Ltf_id_bitmatrix, t2; \
vpbroadcastq .Ltf_aff_bitmatrix, t3; \
vpbroadcastq .Ltf_x2_bitmatrix, t4; \
vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0; \
vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1; \
vpbroadcastq .Ltf_id_bitmatrix(%rip), t2; \
vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3; \
vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
@ -332,12 +332,12 @@
t4, t5, t6, t7) \
vpxor t7, t7, t7; \
vpxor t6, t6, t6; \
vbroadcasti128 .Linv_shift_row, t0; \
vbroadcasti128 .Lshift_row, t1; \
vbroadcasti128 .Ltf_lo__inv_aff__and__s2, t2; \
vbroadcasti128 .Ltf_hi__inv_aff__and__s2, t3; \
vbroadcasti128 .Ltf_lo__x2__and__fwd_aff, t4; \
vbroadcasti128 .Ltf_hi__x2__and__fwd_aff, t5; \
vbroadcasti128 .Linv_shift_row(%rip), t0; \
vbroadcasti128 .Lshift_row(%rip), t1; \
vbroadcasti128 .Ltf_lo__inv_aff__and__s2(%rip), t2; \
vbroadcasti128 .Ltf_hi__inv_aff__and__s2(%rip), t3; \
vbroadcasti128 .Ltf_lo__x2__and__fwd_aff(%rip), t4; \
vbroadcasti128 .Ltf_hi__x2__and__fwd_aff(%rip), t5; \
\
vextracti128 $1, x0, t6##_x; \
vaesenclast t7##_x, x0##_x, x0##_x; \
@ -369,7 +369,7 @@
vaesdeclast t7##_x, t6##_x, t6##_x; \
vinserti128 $1, t6##_x, x6, x6; \
\
vpbroadcastd .L0f0f0f0f, t6; \
vpbroadcastd .L0f0f0f0f(%rip), t6; \
\
/* AES inverse shift rows */ \
vpshufb t0, x0, x0; \

View File

@ -80,7 +80,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vbroadcasti64x2 .Lshufb_16x16b, a0; \
vbroadcasti64x2 .Lshufb_16x16b(%rip), a0; \
vmovdqu64 st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -132,7 +132,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vbroadcasti64x2 .Lshufb_16x16b, a0; \
vbroadcasti64x2 .Lshufb_16x16b(%rip), a0; \
vmovdqu64 st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -308,11 +308,11 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
vpbroadcastq .Ltf_s2_bitmatrix, t0; \
vpbroadcastq .Ltf_inv_bitmatrix, t1; \
vpbroadcastq .Ltf_id_bitmatrix, t2; \
vpbroadcastq .Ltf_aff_bitmatrix, t3; \
vpbroadcastq .Ltf_x2_bitmatrix, t4; \
vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0; \
vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1; \
vpbroadcastq .Ltf_id_bitmatrix(%rip), t2; \
vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3; \
vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
@ -332,11 +332,11 @@
y4, y5, y6, y7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
vpbroadcastq .Ltf_s2_bitmatrix, t0; \
vpbroadcastq .Ltf_inv_bitmatrix, t1; \
vpbroadcastq .Ltf_id_bitmatrix, t2; \
vpbroadcastq .Ltf_aff_bitmatrix, t3; \
vpbroadcastq .Ltf_x2_bitmatrix, t4; \
vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0; \
vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1; \
vpbroadcastq .Ltf_id_bitmatrix(%rip), t2; \
vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3; \
vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \

View File

@ -52,10 +52,10 @@
/* \
* S-function with AES subbytes \
*/ \
vmovdqa .Linv_shift_row, t4; \
vbroadcastss .L0f0f0f0f, t7; \
vmovdqa .Lpre_tf_lo_s1, t0; \
vmovdqa .Lpre_tf_hi_s1, t1; \
vmovdqa .Linv_shift_row(%rip), t4; \
vbroadcastss .L0f0f0f0f(%rip), t7; \
vmovdqa .Lpre_tf_lo_s1(%rip), t0; \
vmovdqa .Lpre_tf_hi_s1(%rip), t1; \
\
/* AES inverse shift rows */ \
vpshufb t4, x0, x0; \
@ -68,8 +68,8 @@
vpshufb t4, x6, x6; \
\
/* prefilter sboxes 1, 2 and 3 */ \
vmovdqa .Lpre_tf_lo_s4, t2; \
vmovdqa .Lpre_tf_hi_s4, t3; \
vmovdqa .Lpre_tf_lo_s4(%rip), t2; \
vmovdqa .Lpre_tf_hi_s4(%rip), t3; \
filter_8bit(x0, t0, t1, t7, t6); \
filter_8bit(x7, t0, t1, t7, t6); \
filter_8bit(x1, t0, t1, t7, t6); \
@ -83,8 +83,8 @@
filter_8bit(x6, t2, t3, t7, t6); \
\
/* AES subbytes + AES shift rows */ \
vmovdqa .Lpost_tf_lo_s1, t0; \
vmovdqa .Lpost_tf_hi_s1, t1; \
vmovdqa .Lpost_tf_lo_s1(%rip), t0; \
vmovdqa .Lpost_tf_hi_s1(%rip), t1; \
vaesenclast t4, x0, x0; \
vaesenclast t4, x7, x7; \
vaesenclast t4, x1, x1; \
@ -95,16 +95,16 @@
vaesenclast t4, x6, x6; \
\
/* postfilter sboxes 1 and 4 */ \
vmovdqa .Lpost_tf_lo_s3, t2; \
vmovdqa .Lpost_tf_hi_s3, t3; \
vmovdqa .Lpost_tf_lo_s3(%rip), t2; \
vmovdqa .Lpost_tf_hi_s3(%rip), t3; \
filter_8bit(x0, t0, t1, t7, t6); \
filter_8bit(x7, t0, t1, t7, t6); \
filter_8bit(x3, t0, t1, t7, t6); \
filter_8bit(x6, t0, t1, t7, t6); \
\
/* postfilter sbox 3 */ \
vmovdqa .Lpost_tf_lo_s2, t4; \
vmovdqa .Lpost_tf_hi_s2, t5; \
vmovdqa .Lpost_tf_lo_s2(%rip), t4; \
vmovdqa .Lpost_tf_hi_s2(%rip), t5; \
filter_8bit(x2, t2, t3, t7, t6); \
filter_8bit(x5, t2, t3, t7, t6); \
\
@ -443,7 +443,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vmovdqu .Lshufb_16x16b, a0; \
vmovdqu .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -482,7 +482,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
#define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
y6, y7, rio, key) \
vmovq key, x0; \
vpshufb .Lpack_bswap, x0, x0; \
vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor 0 * 16(rio), x0, y7; \
vpxor 1 * 16(rio), x0, y6; \
@ -533,7 +533,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu x0, stack_tmp0; \
\
vmovq key, x0; \
vpshufb .Lpack_bswap, x0, x0; \
vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor x0, y7, y7; \
vpxor x0, y6, y6; \

View File

@ -64,12 +64,12 @@
/* \
* S-function with AES subbytes \
*/ \
vbroadcasti128 .Linv_shift_row, t4; \
vpbroadcastd .L0f0f0f0f, t7; \
vbroadcasti128 .Lpre_tf_lo_s1, t5; \
vbroadcasti128 .Lpre_tf_hi_s1, t6; \
vbroadcasti128 .Lpre_tf_lo_s4, t2; \
vbroadcasti128 .Lpre_tf_hi_s4, t3; \
vbroadcasti128 .Linv_shift_row(%rip), t4; \
vpbroadcastd .L0f0f0f0f(%rip), t7; \
vbroadcasti128 .Lpre_tf_lo_s1(%rip), t5; \
vbroadcasti128 .Lpre_tf_hi_s1(%rip), t6; \
vbroadcasti128 .Lpre_tf_lo_s4(%rip), t2; \
vbroadcasti128 .Lpre_tf_hi_s4(%rip), t3; \
\
/* AES inverse shift rows */ \
vpshufb t4, x0, x0; \
@ -115,8 +115,8 @@
vinserti128 $1, t2##_x, x6, x6; \
vextracti128 $1, x1, t3##_x; \
vextracti128 $1, x4, t2##_x; \
vbroadcasti128 .Lpost_tf_lo_s1, t0; \
vbroadcasti128 .Lpost_tf_hi_s1, t1; \
vbroadcasti128 .Lpost_tf_lo_s1(%rip), t0; \
vbroadcasti128 .Lpost_tf_hi_s1(%rip), t1; \
vaesenclast t4##_x, x2##_x, x2##_x; \
vaesenclast t4##_x, t6##_x, t6##_x; \
vinserti128 $1, t6##_x, x2, x2; \
@ -131,16 +131,16 @@
vinserti128 $1, t2##_x, x4, x4; \
\
/* postfilter sboxes 1 and 4 */ \
vbroadcasti128 .Lpost_tf_lo_s3, t2; \
vbroadcasti128 .Lpost_tf_hi_s3, t3; \
vbroadcasti128 .Lpost_tf_lo_s3(%rip), t2; \
vbroadcasti128 .Lpost_tf_hi_s3(%rip), t3; \
filter_8bit(x0, t0, t1, t7, t6); \
filter_8bit(x7, t0, t1, t7, t6); \
filter_8bit(x3, t0, t1, t7, t6); \
filter_8bit(x6, t0, t1, t7, t6); \
\
/* postfilter sbox 3 */ \
vbroadcasti128 .Lpost_tf_lo_s2, t4; \
vbroadcasti128 .Lpost_tf_hi_s2, t5; \
vbroadcasti128 .Lpost_tf_lo_s2(%rip), t4; \
vbroadcasti128 .Lpost_tf_hi_s2(%rip), t5; \
filter_8bit(x2, t2, t3, t7, t6); \
filter_8bit(x5, t2, t3, t7, t6); \
\
@ -475,7 +475,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
vbroadcasti128 .Lshufb_16x16b, a0; \
vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@ -514,7 +514,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
#define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
y6, y7, rio, key) \
vpbroadcastq key, x0; \
vpshufb .Lpack_bswap, x0, x0; \
vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor 0 * 32(rio), x0, y7; \
vpxor 1 * 32(rio), x0, y6; \
@ -565,7 +565,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu x0, stack_tmp0; \
\
vpbroadcastq key, x0; \
vpshufb .Lpack_bswap, x0, x0; \
vpshufb .Lpack_bswap(%rip), x0, x0; \
\
vpxor x0, y7, y7; \
vpxor x0, y6, y6; \

View File

@ -77,11 +77,13 @@
#define RXORbl %r9b
#define xor2ror16(T0, T1, tmp1, tmp2, ab, dst) \
leaq T0(%rip), tmp1; \
movzbl ab ## bl, tmp2 ## d; \
xorq (tmp1, tmp2, 8), dst; \
leaq T1(%rip), tmp2; \
movzbl ab ## bh, tmp1 ## d; \
rorq $16, ab; \
xorq T0(, tmp2, 8), dst; \
xorq T1(, tmp1, 8), dst;
xorq (tmp2, tmp1, 8), dst;
/**********************************************************************
1-way camellia

View File

@ -84,15 +84,19 @@
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
movzbl src ## bh, RID1d; \
leaq s1(%rip), RID2; \
movl (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
leaq s2(%rip), RID1; \
op1 (RID1,RID2,4), dst ## d; \
shrq $16, src; \
movl s1(, RID1, 4), dst ## d; \
op1 s2(, RID2, 4), dst ## d; \
movzbl src ## bh, RID1d; \
leaq s3(%rip), RID2; \
op2 (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
interleave_op(il_reg); \
op2 s3(, RID1, 4), dst ## d; \
op3 s4(, RID2, 4), dst ## d;
leaq s4(%rip), RID1; \
op3 (RID1,RID2,4), dst ## d;
#define dummy(d) /* do nothing */
@ -151,15 +155,15 @@
subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
#define enc_preload_rkr() \
vbroadcastss .L16_mask, RKR; \
vbroadcastss .L16_mask(%rip), RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor kr(CTX), RKR, RKR;
#define dec_preload_rkr() \
vbroadcastss .L16_mask, RKR; \
vbroadcastss .L16_mask(%rip), RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor kr(CTX), RKR, RKR; \
vpshufb .Lbswap128_mask, RKR, RKR;
vpshufb .Lbswap128_mask(%rip), RKR, RKR;
#define transpose_2x4(x0, x1, t0, t1) \
vpunpckldq x1, x0, t0; \
@ -235,9 +239,9 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
movq %rdi, CTX;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
vmovdqa .Lbswap_mask(%rip), RKM;
vmovd .Lfirst_mask(%rip), R1ST;
vmovd .L32_mask(%rip), R32;
enc_preload_rkr();
inpack_blocks(RL1, RR1, RTMP, RX, RKM);
@ -271,7 +275,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
popq %rbx;
popq %r15;
vmovdqa .Lbswap_mask, RKM;
vmovdqa .Lbswap_mask(%rip), RKM;
outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
@ -308,9 +312,9 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
movq %rdi, CTX;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
vmovdqa .Lbswap_mask(%rip), RKM;
vmovd .Lfirst_mask(%rip), R1ST;
vmovd .L32_mask(%rip), R32;
dec_preload_rkr();
inpack_blocks(RL1, RR1, RTMP, RX, RKM);
@ -341,7 +345,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
round(RL, RR, 1, 2);
round(RR, RL, 0, 1);
vmovdqa .Lbswap_mask, RKM;
vmovdqa .Lbswap_mask(%rip), RKM;
popq %rbx;
popq %r15;
@ -504,8 +508,8 @@ SYM_FUNC_START(cast5_ctr_16way)
vpcmpeqd RKR, RKR, RKR;
vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
vmovdqa .Lbswap_iv_mask, R1ST;
vmovdqa .Lbswap128_mask, RKM;
vmovdqa .Lbswap_iv_mask(%rip), R1ST;
vmovdqa .Lbswap128_mask(%rip), RKM;
/* load IV and byteswap */
vmovq (%rcx), RX;

View File

@ -84,15 +84,19 @@
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
movzbl src ## bh, RID1d; \
leaq s1(%rip), RID2; \
movl (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
leaq s2(%rip), RID1; \
op1 (RID1,RID2,4), dst ## d; \
shrq $16, src; \
movl s1(, RID1, 4), dst ## d; \
op1 s2(, RID2, 4), dst ## d; \
movzbl src ## bh, RID1d; \
leaq s3(%rip), RID2; \
op2 (RID2,RID1,4), dst ## d; \
movzbl src ## bl, RID2d; \
interleave_op(il_reg); \
op2 s3(, RID1, 4), dst ## d; \
op3 s4(, RID2, 4), dst ## d;
leaq s4(%rip), RID1; \
op3 (RID1,RID2,4), dst ## d;
#define dummy(d) /* do nothing */
@ -175,10 +179,10 @@
qop(RD, RC, 1);
#define shuffle(mask) \
vpshufb mask, RKR, RKR;
vpshufb mask(%rip), RKR, RKR;
#define preload_rkr(n, do_mask, mask) \
vbroadcastss .L16_mask, RKR; \
vbroadcastss .L16_mask(%rip), RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor (kr+n*16)(CTX), RKR, RKR; \
do_mask(mask);
@ -258,9 +262,9 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
movq %rdi, CTX;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
vmovdqa .Lbswap_mask(%rip), RKM;
vmovd .Lfirst_mask(%rip), R1ST;
vmovd .L32_mask(%rip), R32;
inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@ -284,7 +288,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
popq %rbx;
popq %r15;
vmovdqa .Lbswap_mask, RKM;
vmovdqa .Lbswap_mask(%rip), RKM;
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@ -306,9 +310,9 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
movq %rdi, CTX;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
vmovdqa .Lbswap_mask(%rip), RKM;
vmovd .Lfirst_mask(%rip), R1ST;
vmovd .L32_mask(%rip), R32;
inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@ -332,7 +336,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
popq %rbx;
popq %r15;
vmovdqa .Lbswap_mask, RKM;
vmovdqa .Lbswap_mask(%rip), RKM;
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);

View File

@ -90,7 +90,7 @@ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligne
sub $0x40, LEN
add $0x40, BUF
cmp $0x40, LEN
jb less_64
jb .Lless_64
#ifdef __x86_64__
movdqa .Lconstant_R2R1(%rip), CONSTANT
@ -98,7 +98,7 @@ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligne
movdqa .Lconstant_R2R1, CONSTANT
#endif
loop_64:/* 64 bytes Full cache line folding */
.Lloop_64:/* 64 bytes Full cache line folding */
prefetchnta 0x40(BUF)
movdqa %xmm1, %xmm5
movdqa %xmm2, %xmm6
@ -139,8 +139,8 @@ loop_64:/* 64 bytes Full cache line folding */
sub $0x40, LEN
add $0x40, BUF
cmp $0x40, LEN
jge loop_64
less_64:/* Folding cache line into 128bit */
jge .Lloop_64
.Lless_64:/* Folding cache line into 128bit */
#ifdef __x86_64__
movdqa .Lconstant_R4R3(%rip), CONSTANT
#else
@ -167,8 +167,8 @@ less_64:/* Folding cache line into 128bit */
pxor %xmm4, %xmm1
cmp $0x10, LEN
jb fold_64
loop_16:/* Folding rest buffer into 128bit */
jb .Lfold_64
.Lloop_16:/* Folding rest buffer into 128bit */
movdqa %xmm1, %xmm5
pclmulqdq $0x00, CONSTANT, %xmm1
pclmulqdq $0x11, CONSTANT, %xmm5
@ -177,9 +177,9 @@ loop_16:/* Folding rest buffer into 128bit */
sub $0x10, LEN
add $0x10, BUF
cmp $0x10, LEN
jge loop_16
jge .Lloop_16
fold_64:
.Lfold_64:
/* perform the last 64 bit fold, also adds 32 zeroes
* to the input stream */
pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */

View File

@ -49,15 +49,15 @@
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
.macro LABEL prefix n
\prefix\n\():
.L\prefix\n\():
.endm
.macro JMPTBL_ENTRY i
.quad crc_\i
.quad .Lcrc_\i
.endm
.macro JNC_LESS_THAN j
jnc less_than_\j
jnc .Lless_than_\j
.endm
# Define threshold where buffers are considered "small" and routed to more
@ -108,30 +108,30 @@ SYM_FUNC_START(crc_pcl)
neg %bufp
and $7, %bufp # calculate the unalignment amount of
# the address
je proc_block # Skip if aligned
je .Lproc_block # Skip if aligned
## If len is less than 8 and we're unaligned, we need to jump
## to special code to avoid reading beyond the end of the buffer
cmp $8, len
jae do_align
jae .Ldo_align
# less_than_8 expects length in upper 3 bits of len_dw
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
shl $32-3+1, len_dw
jmp less_than_8_post_shl1
jmp .Lless_than_8_post_shl1
do_align:
.Ldo_align:
#### Calculate CRC of unaligned bytes of the buffer (if any)
movq (bufptmp), tmp # load a quadward from the buffer
add %bufp, bufptmp # align buffer pointer for quadword
# processing
sub %bufp, len # update buffer length
align_loop:
.Lalign_loop:
crc32b %bl, crc_init_dw # compute crc32 of 1-byte
shr $8, tmp # get next byte
dec %bufp
jne align_loop
jne .Lalign_loop
proc_block:
.Lproc_block:
################################################################
## 2) PROCESS BLOCKS:
@ -141,11 +141,11 @@ proc_block:
movq len, tmp # save num bytes in tmp
cmpq $128*24, len
jae full_block
jae .Lfull_block
continue_block:
.Lcontinue_block:
cmpq $SMALL_SIZE, len
jb small
jb .Lsmall
## len < 128*24
movq $2731, %rax # 2731 = ceil(2^16 / 24)
@ -168,13 +168,14 @@ continue_block:
xor crc2, crc2
## branch into array
mov jump_table(,%rax,8), %bufp
leaq jump_table(%rip), %bufp
mov (%bufp,%rax,8), %bufp
JMP_NOSPEC bufp
################################################################
## 2a) PROCESS FULL BLOCKS:
################################################################
full_block:
.Lfull_block:
movl $128,%eax
lea 128*8*2(block_0), block_1
lea 128*8*3(block_0), block_2
@ -189,7 +190,6 @@ full_block:
## 3) CRC Array:
################################################################
crc_array:
i=128
.rept 128-1
.altmacro
@ -242,28 +242,28 @@ LABEL crc_ 0
ENDBR
mov tmp, len
cmp $128*24, tmp
jae full_block
jae .Lfull_block
cmp $24, tmp
jae continue_block
jae .Lcontinue_block
less_than_24:
.Lless_than_24:
shl $32-4, len_dw # less_than_16 expects length
# in upper 4 bits of len_dw
jnc less_than_16
jnc .Lless_than_16
crc32q (bufptmp), crc_init
crc32q 8(bufptmp), crc_init
jz do_return
jz .Ldo_return
add $16, bufptmp
# len is less than 8 if we got here
# less_than_8 expects length in upper 3 bits of len_dw
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
shl $2, len_dw
jmp less_than_8_post_shl1
jmp .Lless_than_8_post_shl1
#######################################################################
## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full)
#######################################################################
small:
.Lsmall:
shl $32-8, len_dw # Prepare len_dw for less_than_256
j=256
.rept 5 # j = {256, 128, 64, 32, 16}
@ -279,32 +279,32 @@ LABEL less_than_ %j # less_than_j: Length should be in
crc32q i(bufptmp), crc_init # Compute crc32 of 8-byte data
i=i+8
.endr
jz do_return # Return if remaining length is zero
jz .Ldo_return # Return if remaining length is zero
add $j, bufptmp # Advance buf
.endr
less_than_8: # Length should be stored in
.Lless_than_8: # Length should be stored in
# upper 3 bits of len_dw
shl $1, len_dw
less_than_8_post_shl1:
jnc less_than_4
.Lless_than_8_post_shl1:
jnc .Lless_than_4
crc32l (bufptmp), crc_init_dw # CRC of 4 bytes
jz do_return # return if remaining data is zero
jz .Ldo_return # return if remaining data is zero
add $4, bufptmp
less_than_4: # Length should be stored in
.Lless_than_4: # Length should be stored in
# upper 2 bits of len_dw
shl $1, len_dw
jnc less_than_2
jnc .Lless_than_2
crc32w (bufptmp), crc_init_dw # CRC of 2 bytes
jz do_return # return if remaining data is zero
jz .Ldo_return # return if remaining data is zero
add $2, bufptmp
less_than_2: # Length should be stored in the MSB
.Lless_than_2: # Length should be stored in the MSB
# of len_dw
shl $1, len_dw
jnc less_than_1
jnc .Lless_than_1
crc32b (bufptmp), crc_init_dw # CRC of 1 byte
less_than_1: # Length should be zero
do_return:
.Lless_than_1: # Length should be zero
.Ldo_return:
movq crc_init, %rax
popq %rsi
popq %rdi

View File

@ -129,21 +129,29 @@
movzbl RW0bl, RT2d; \
movzbl RW0bh, RT3d; \
shrq $16, RW0; \
movq s8(, RT0, 8), RT0; \
xorq s6(, RT1, 8), to; \
leaq s8(%rip), RW1; \
movq (RW1, RT0, 8), RT0; \
leaq s6(%rip), RW1; \
xorq (RW1, RT1, 8), to; \
movzbl RW0bl, RL1d; \
movzbl RW0bh, RT1d; \
shrl $16, RW0d; \
xorq s4(, RT2, 8), RT0; \
xorq s2(, RT3, 8), to; \
leaq s4(%rip), RW1; \
xorq (RW1, RT2, 8), RT0; \
leaq s2(%rip), RW1; \
xorq (RW1, RT3, 8), to; \
movzbl RW0bl, RT2d; \
movzbl RW0bh, RT3d; \
xorq s7(, RL1, 8), RT0; \
xorq s5(, RT1, 8), to; \
xorq s3(, RT2, 8), RT0; \
leaq s7(%rip), RW1; \
xorq (RW1, RL1, 8), RT0; \
leaq s5(%rip), RW1; \
xorq (RW1, RT1, 8), to; \
leaq s3(%rip), RW1; \
xorq (RW1, RT2, 8), RT0; \
load_next_key(n, RW0); \
xorq RT0, to; \
xorq s1(, RT3, 8), to; \
leaq s1(%rip), RW1; \
xorq (RW1, RT3, 8), to; \
#define load_next_key(n, RWx) \
movq (((n) + 1) * 8)(CTX), RWx;
@ -355,65 +363,89 @@ SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrq $16, RW0; \
xorq s8(, RT3, 8), to##0; \
xorq s6(, RT1, 8), to##0; \
leaq s8(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s6(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrq $16, RW0; \
xorq s4(, RT3, 8), to##0; \
xorq s2(, RT1, 8), to##0; \
leaq s4(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s2(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
shrl $16, RW0d; \
xorq s7(, RT3, 8), to##0; \
xorq s5(, RT1, 8), to##0; \
leaq s7(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s5(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
movzbl RW0bl, RT3d; \
movzbl RW0bh, RT1d; \
load_next_key(n, RW0); \
xorq s3(, RT3, 8), to##0; \
xorq s1(, RT1, 8), to##0; \
leaq s3(%rip), RT2; \
xorq (RT2, RT3, 8), to##0; \
leaq s1(%rip), RT2; \
xorq (RT2, RT1, 8), to##0; \
xorq from##1, RW1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrq $16, RW1; \
xorq s8(, RT3, 8), to##1; \
xorq s6(, RT1, 8), to##1; \
leaq s8(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s6(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrq $16, RW1; \
xorq s4(, RT3, 8), to##1; \
xorq s2(, RT1, 8), to##1; \
leaq s4(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s2(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
shrl $16, RW1d; \
xorq s7(, RT3, 8), to##1; \
xorq s5(, RT1, 8), to##1; \
leaq s7(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s5(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
movzbl RW1bl, RT3d; \
movzbl RW1bh, RT1d; \
do_movq(RW0, RW1); \
xorq s3(, RT3, 8), to##1; \
xorq s1(, RT1, 8), to##1; \
leaq s3(%rip), RT2; \
xorq (RT2, RT3, 8), to##1; \
leaq s1(%rip), RT2; \
xorq (RT2, RT1, 8), to##1; \
xorq from##2, RW2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrq $16, RW2; \
xorq s8(, RT3, 8), to##2; \
xorq s6(, RT1, 8), to##2; \
leaq s8(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s6(%rip), RT2; \
xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrq $16, RW2; \
xorq s4(, RT3, 8), to##2; \
xorq s2(, RT1, 8), to##2; \
leaq s4(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s2(%rip), RT2; \
xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
shrl $16, RW2d; \
xorq s7(, RT3, 8), to##2; \
xorq s5(, RT1, 8), to##2; \
leaq s7(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s5(%rip), RT2; \
xorq (RT2, RT1, 8), to##2; \
movzbl RW2bl, RT3d; \
movzbl RW2bh, RT1d; \
do_movq(RW0, RW2); \
xorq s3(, RT3, 8), to##2; \
xorq s1(, RT1, 8), to##2;
leaq s3(%rip), RT2; \
xorq (RT2, RT3, 8), to##2; \
leaq s1(%rip), RT2; \
xorq (RT2, RT1, 8), to##2;
#define __movq(src, dst) \
movq src, dst;

View File

@ -93,7 +93,7 @@ SYM_FUNC_START(clmul_ghash_mul)
FRAME_BEGIN
movups (%rdi), DATA
movups (%rsi), SHASH
movaps .Lbswap_mask, BSWAP
movaps .Lbswap_mask(%rip), BSWAP
pshufb BSWAP, DATA
call __clmul_gf128mul_ble
pshufb BSWAP, DATA
@ -110,7 +110,7 @@ SYM_FUNC_START(clmul_ghash_update)
FRAME_BEGIN
cmp $16, %rdx
jb .Lupdate_just_ret # check length
movaps .Lbswap_mask, BSWAP
movaps .Lbswap_mask(%rip), BSWAP
movups (%rdi), DATA
movups (%rcx), SHASH
pshufb BSWAP, DATA

View File

@ -485,18 +485,18 @@
xchg WK_BUF, PRECALC_BUF
.align 32
_loop:
.L_loop:
/*
* code loops through more than one block
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
test BLOCKS_CTR, BLOCKS_CTR
jnz _begin
jnz .L_begin
.align 32
jmp _end
jmp .L_end
.align 32
_begin:
.L_begin:
/*
* Do first block
@ -508,9 +508,6 @@ _begin:
.set j, j+2
.endr
jmp _loop0
_loop0:
/*
* rounds:
* 10,12,14,16,18
@ -545,7 +542,7 @@ _loop0:
UPDATE_HASH 16(HASH_PTR), E
test BLOCKS_CTR, BLOCKS_CTR
jz _loop
jz .L_loop
mov TB, B
@ -562,8 +559,6 @@ _loop0:
.set j, j+2
.endr
jmp _loop1
_loop1:
/*
* rounds
* 20+80,22+80,24+80,26+80,28+80
@ -574,9 +569,6 @@ _loop1:
.set j, j+2
.endr
jmp _loop2
_loop2:
/*
* rounds
* 40+80,42+80,44+80,46+80,48+80
@ -592,9 +584,6 @@ _loop2:
/* Move to the next block only if needed*/
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
jmp _loop3
_loop3:
/*
* rounds
* 60+80,62+80,64+80,66+80,68+80
@ -623,10 +612,10 @@ _loop3:
xchg WK_BUF, PRECALC_BUF
jmp _loop
jmp .L_loop
.align 32
_end:
.L_end:
.endm
/*

View File

@ -360,7 +360,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
and $~15, %rsp # align stack pointer
shl $6, NUM_BLKS # convert to bytes
jz done_hash
jz .Ldone_hash
add INP, NUM_BLKS # pointer to end of data
mov NUM_BLKS, _INP_END(%rsp)
@ -377,7 +377,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
loop0:
.Lloop0:
lea K256(%rip), TBL
## byte swap first 16 dwords
@ -391,7 +391,7 @@ loop0:
## schedule 48 input dwords, by doing 3 rounds of 16 each
mov $3, SRND
.align 16
loop1:
.Lloop1:
vpaddd (TBL), X0, XFER
vmovdqa XFER, _XFER(%rsp)
FOUR_ROUNDS_AND_SCHED
@ -410,10 +410,10 @@ loop1:
FOUR_ROUNDS_AND_SCHED
sub $1, SRND
jne loop1
jne .Lloop1
mov $2, SRND
loop2:
.Lloop2:
vpaddd (TBL), X0, XFER
vmovdqa XFER, _XFER(%rsp)
DO_ROUND 0
@ -433,7 +433,7 @@ loop2:
vmovdqa X3, X1
sub $1, SRND
jne loop2
jne .Lloop2
addm (4*0)(CTX),a
addm (4*1)(CTX),b
@ -447,9 +447,9 @@ loop2:
mov _INP(%rsp), INP
add $64, INP
cmp _INP_END(%rsp), INP
jne loop0
jne .Lloop0
done_hash:
.Ldone_hash:
mov %rbp, %rsp
popq %rbp

View File

@ -538,12 +538,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
and $-32, %rsp # align rsp to 32 byte boundary
shl $6, NUM_BLKS # convert to bytes
jz done_hash
jz .Ldone_hash
lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block
mov NUM_BLKS, _INP_END(%rsp)
cmp NUM_BLKS, INP
je only_one_block
je .Lonly_one_block
## load initial digest
mov (CTX), a
@ -561,7 +561,7 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
mov CTX, _CTX(%rsp)
loop0:
.Lloop0:
## Load first 16 dwords from two blocks
VMOVDQ 0*32(INP),XTMP0
VMOVDQ 1*32(INP),XTMP1
@ -580,7 +580,7 @@ loop0:
vperm2i128 $0x20, XTMP3, XTMP1, X2
vperm2i128 $0x31, XTMP3, XTMP1, X3
last_block_enter:
.Llast_block_enter:
add $64, INP
mov INP, _INP(%rsp)
@ -588,34 +588,40 @@ last_block_enter:
xor SRND, SRND
.align 16
loop1:
vpaddd K256+0*32(SRND), X0, XFER
.Lloop1:
leaq K256+0*32(%rip), INP ## reuse INP as scratch reg
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 0*32
vpaddd K256+1*32(SRND), X0, XFER
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 1*32
vpaddd K256+2*32(SRND), X0, XFER
leaq K256+2*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 2*32
vpaddd K256+3*32(SRND), X0, XFER
leaq K256+3*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 3*32
add $4*32, SRND
cmp $3*4*32, SRND
jb loop1
jb .Lloop1
loop2:
.Lloop2:
## Do last 16 rounds with no scheduling
vpaddd K256+0*32(SRND), X0, XFER
leaq K256+0*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 0*32
vpaddd K256+1*32(SRND), X1, XFER
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X1, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 1*32
add $2*32, SRND
@ -624,7 +630,7 @@ loop2:
vmovdqa X3, X1
cmp $4*4*32, SRND
jb loop2
jb .Lloop2
mov _CTX(%rsp), CTX
mov _INP(%rsp), INP
@ -639,17 +645,17 @@ loop2:
addm (4*7)(CTX),h
cmp _INP_END(%rsp), INP
ja done_hash
ja .Ldone_hash
#### Do second block using previously scheduled results
xor SRND, SRND
.align 16
loop3:
.Lloop3:
DO_4ROUNDS _XFER + 0*32 + 16
DO_4ROUNDS _XFER + 1*32 + 16
add $2*32, SRND
cmp $4*4*32, SRND
jb loop3
jb .Lloop3
mov _CTX(%rsp), CTX
mov _INP(%rsp), INP
@ -665,10 +671,10 @@ loop3:
addm (4*7)(CTX),h
cmp _INP_END(%rsp), INP
jb loop0
ja done_hash
jb .Lloop0
ja .Ldone_hash
do_last_block:
.Ldo_last_block:
VMOVDQ 0*16(INP),XWORD0
VMOVDQ 1*16(INP),XWORD1
VMOVDQ 2*16(INP),XWORD2
@ -679,9 +685,9 @@ do_last_block:
vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2
vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3
jmp last_block_enter
jmp .Llast_block_enter
only_one_block:
.Lonly_one_block:
## load initial digest
mov (4*0)(CTX),a
@ -698,9 +704,9 @@ only_one_block:
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
mov CTX, _CTX(%rsp)
jmp do_last_block
jmp .Ldo_last_block
done_hash:
.Ldone_hash:
mov %rbp, %rsp
pop %rbp

View File

@ -369,7 +369,7 @@ SYM_TYPED_FUNC_START(sha256_transform_ssse3)
and $~15, %rsp
shl $6, NUM_BLKS # convert to bytes
jz done_hash
jz .Ldone_hash
add INP, NUM_BLKS
mov NUM_BLKS, _INP_END(%rsp) # pointer to end of data
@ -387,7 +387,7 @@ SYM_TYPED_FUNC_START(sha256_transform_ssse3)
movdqa _SHUF_00BA(%rip), SHUF_00BA
movdqa _SHUF_DC00(%rip), SHUF_DC00
loop0:
.Lloop0:
lea K256(%rip), TBL
## byte swap first 16 dwords
@ -401,7 +401,7 @@ loop0:
## schedule 48 input dwords, by doing 3 rounds of 16 each
mov $3, SRND
.align 16
loop1:
.Lloop1:
movdqa (TBL), XFER
paddd X0, XFER
movdqa XFER, _XFER(%rsp)
@ -424,10 +424,10 @@ loop1:
FOUR_ROUNDS_AND_SCHED
sub $1, SRND
jne loop1
jne .Lloop1
mov $2, SRND
loop2:
.Lloop2:
paddd (TBL), X0
movdqa X0, _XFER(%rsp)
DO_ROUND 0
@ -446,7 +446,7 @@ loop2:
movdqa X3, X1
sub $1, SRND
jne loop2
jne .Lloop2
addm (4*0)(CTX),a
addm (4*1)(CTX),b
@ -460,9 +460,9 @@ loop2:
mov _INP(%rsp), INP
add $64, INP
cmp _INP_END(%rsp), INP
jne loop0
jne .Lloop0
done_hash:
.Ldone_hash:
mov %rbp, %rsp
popq %rbp

View File

@ -276,7 +276,7 @@ frame_size = frame_WK + WK_SIZE
########################################################################
SYM_TYPED_FUNC_START(sha512_transform_avx)
test msglen, msglen
je nowork
je .Lnowork
# Save GPRs
push %rbx
@ -291,7 +291,7 @@ SYM_TYPED_FUNC_START(sha512_transform_avx)
sub $frame_size, %rsp
and $~(0x20 - 1), %rsp
updateblock:
.Lupdateblock:
# Load state variables
mov DIGEST(0), a_64
@ -348,7 +348,7 @@ updateblock:
# Advance to next message block
add $16*8, msg
dec msglen
jnz updateblock
jnz .Lupdateblock
# Restore Stack Pointer
mov %rbp, %rsp
@ -361,7 +361,7 @@ updateblock:
pop %r12
pop %rbx
nowork:
.Lnowork:
RET
SYM_FUNC_END(sha512_transform_avx)

View File

@ -581,7 +581,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
and $~(0x20 - 1), %rsp
shl $7, NUM_BLKS # convert to bytes
jz done_hash
jz .Ldone_hash
add INP, NUM_BLKS # pointer to end of data
mov NUM_BLKS, frame_INPEND(%rsp)
@ -600,7 +600,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
loop0:
.Lloop0:
lea K512(%rip), TBL
## byte swap first 16 dwords
@ -615,7 +615,7 @@ loop0:
movq $4, frame_SRND(%rsp)
.align 16
loop1:
.Lloop1:
vpaddq (TBL), Y_0, XFER
vmovdqa XFER, frame_XFER(%rsp)
FOUR_ROUNDS_AND_SCHED
@ -634,10 +634,10 @@ loop1:
FOUR_ROUNDS_AND_SCHED
subq $1, frame_SRND(%rsp)
jne loop1
jne .Lloop1
movq $2, frame_SRND(%rsp)
loop2:
.Lloop2:
vpaddq (TBL), Y_0, XFER
vmovdqa XFER, frame_XFER(%rsp)
DO_4ROUNDS
@ -650,7 +650,7 @@ loop2:
vmovdqa Y_3, Y_1
subq $1, frame_SRND(%rsp)
jne loop2
jne .Lloop2
mov frame_CTX(%rsp), CTX2
addm 8*0(CTX2), a
@ -665,9 +665,9 @@ loop2:
mov frame_INP(%rsp), INP
add $128, INP
cmp frame_INPEND(%rsp), INP
jne loop0
jne .Lloop0
done_hash:
.Ldone_hash:
# Restore Stack Pointer
mov %rbp, %rsp

View File

@ -278,7 +278,7 @@ frame_size = frame_WK + WK_SIZE
SYM_TYPED_FUNC_START(sha512_transform_ssse3)
test msglen, msglen
je nowork
je .Lnowork
# Save GPRs
push %rbx
@ -293,7 +293,7 @@ SYM_TYPED_FUNC_START(sha512_transform_ssse3)
sub $frame_size, %rsp
and $~(0x20 - 1), %rsp
updateblock:
.Lupdateblock:
# Load state variables
mov DIGEST(0), a_64
@ -350,7 +350,7 @@ updateblock:
# Advance to next message block
add $16*8, msg
dec msglen
jnz updateblock
jnz .Lupdateblock
# Restore Stack Pointer
mov %rbp, %rsp
@ -363,7 +363,7 @@ updateblock:
pop %r12
pop %rbx
nowork:
.Lnowork:
RET
SYM_FUNC_END(sha512_transform_ssse3)

View File

@ -12,6 +12,7 @@
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/psp.h>
#include <linux/psp-sev.h>
#include <linux/pagemap.h>
#include <linux/swap.h>

View File

@ -6,25 +6,35 @@
* Authors: Weigang Li <weigang.li@intel.com>
* Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
#include <crypto/internal/acompress.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <linux/cryptouser.h>
#include <linux/compiler.h>
#include <net/netlink.h>
#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include "internal.h"
#include "compress.h"
struct crypto_scomp;
static const struct crypto_type crypto_acomp_type;
#ifdef CONFIG_NET
static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct acomp_alg, calg.base);
}
static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
{
return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
}
static int __maybe_unused crypto_acomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
@ -34,12 +44,6 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
#else
static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -89,13 +93,44 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
return extsize;
}
static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
struct crypto_alg *alg)
{
struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
struct crypto_istat_compress *istat = comp_get_stat(calg);
struct crypto_stat_compress racomp;
memset(&racomp, 0, sizeof(racomp));
strscpy(racomp.type, "acomp", sizeof(racomp.type));
racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
}
#ifdef CONFIG_CRYPTO_STATS
int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
{
return __crypto_acomp_report_stat(skb, alg);
}
#endif
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_acomp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_acomp_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
@ -147,12 +182,24 @@ void acomp_request_free(struct acomp_req *req)
}
EXPORT_SYMBOL_GPL(acomp_request_free);
int crypto_register_acomp(struct acomp_alg *alg)
void comp_prepare_alg(struct comp_alg_common *alg)
{
struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_acomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
}
int crypto_register_acomp(struct acomp_alg *alg)
{
struct crypto_alg *base = &alg->calg.base;
comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);

View File

@ -8,17 +8,27 @@
*/
#include <crypto/internal/aead.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
return &alg->stat;
#else
return NULL;
#endif
}
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@ -80,39 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
{
if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
return err;
if (err && err != -EINPROGRESS && err != -EBUSY)
atomic64_inc(&istat->err_cnt);
return err;
}
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_alg *alg = aead->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
struct aead_alg *alg = crypto_aead_alg(aead);
struct crypto_istat_aead *istat;
int ret;
crypto_stats_get(alg);
istat = aead_get_stat(alg);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
atomic64_inc(&istat->encrypt_cnt);
atomic64_add(req->cryptlen, &istat->encrypt_tlen);
}
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_aead_alg(aead)->encrypt(req);
crypto_stats_aead_encrypt(cryptlen, alg, ret);
return ret;
ret = alg->encrypt(req);
return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_alg *alg = aead->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
struct aead_alg *alg = crypto_aead_alg(aead);
struct crypto_istat_aead *istat;
int ret;
crypto_stats_get(alg);
istat = aead_get_stat(alg);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
atomic64_inc(&istat->encrypt_cnt);
atomic64_add(req->cryptlen, &istat->encrypt_tlen);
}
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
ret = crypto_aead_alg(aead)->decrypt(req);
crypto_stats_aead_decrypt(cryptlen, alg, ret);
return ret;
ret = alg->decrypt(req);
return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
@ -142,8 +175,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
return 0;
}
#ifdef CONFIG_NET
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_aead_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
@ -159,12 +192,6 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
#else
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -188,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
aead->free(aead);
}
static int __maybe_unused crypto_aead_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
struct crypto_istat_aead *istat = aead_get_stat(aead);
struct crypto_stat_aead raead;
memset(&raead, 0, sizeof(raead));
strscpy(raead.type, "aead", sizeof(raead.type));
raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
}
static const struct crypto_type crypto_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
@ -195,7 +242,12 @@ static const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_aead_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_aead_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
@ -219,6 +271,7 @@ EXPORT_SYMBOL_GPL(crypto_alloc_aead);
static int aead_prepare_alg(struct aead_alg *alg)
{
struct crypto_istat_aead *istat = aead_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@ -232,6 +285,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return 0;
}

View File

@ -8,19 +8,18 @@
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*/
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
#include "hash.h"
static const struct crypto_type crypto_ahash_type;
@ -296,55 +295,60 @@ static int crypto_ahash_op(struct ahash_request *req,
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int err;
if ((unsigned long)req->result & alignmask)
return ahash_op_unaligned(req, op, has_state);
err = ahash_op_unaligned(req, op, has_state);
else
err = op(req);
return op(req);
return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
}
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct crypto_alg *alg = tfm->base.__crt_alg;
unsigned int nbytes = req->nbytes;
int ret;
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
crypto_stats_get(alg);
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&hash_get_stat(alg)->hash_cnt);
return crypto_ahash_op(req, tfm->final, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct crypto_alg *alg = tfm->base.__crt_alg;
unsigned int nbytes = req->nbytes;
int ret;
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
crypto_stats_get(alg);
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = hash_get_stat(alg);
atomic64_inc(&istat->hash_cnt);
atomic64_add(req->nbytes, &istat->hash_tlen);
}
return crypto_ahash_op(req, tfm->finup, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct crypto_alg *alg = tfm->base.__crt_alg;
unsigned int nbytes = req->nbytes;
int ret;
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = hash_get_stat(alg);
atomic64_inc(&istat->hash_cnt);
atomic64_add(req->nbytes, &istat->hash_tlen);
}
crypto_stats_get(alg);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_ahash_op(req, tfm->digest, false);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
return crypto_hash_errstat(alg, -ENOKEY);
return crypto_ahash_op(req, tfm->digest, false);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@ -465,8 +469,8 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst)
ahash->free(ahash);
}
#ifdef CONFIG_NET
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_ahash_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
@ -479,12 +483,6 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
#else
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -498,6 +496,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__crypto_hash_alg_common(alg)->digestsize);
}
static int __maybe_unused crypto_ahash_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
return crypto_hash_report_stat(skb, alg, "ahash");
}
static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
@ -505,7 +509,12 @@ static const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_ahash_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_ahash_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
@ -534,17 +543,70 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
{
struct hash_alg_common *halg = crypto_hash_alg_common(hash);
struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
struct crypto_ahash *nhash;
struct ahash_alg *alg;
int err;
if (!crypto_hash_alg_has_setkey(halg)) {
tfm = crypto_tfm_get(tfm);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
return hash;
}
nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
if (IS_ERR(nhash))
return nhash;
nhash->init = hash->init;
nhash->update = hash->update;
nhash->final = hash->final;
nhash->finup = hash->finup;
nhash->digest = hash->digest;
nhash->export = hash->export;
nhash->import = hash->import;
nhash->setkey = hash->setkey;
nhash->reqsize = hash->reqsize;
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_clone_shash_ops_async(nhash, hash);
err = -ENOSYS;
alg = crypto_ahash_alg(hash);
if (!alg->clone_tfm)
goto out_free_nhash;
err = alg->clone_tfm(nhash, hash);
if (err)
goto out_free_nhash;
return nhash;
out_free_nhash:
crypto_free_ahash(nhash);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_clone_ahash);
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
alg->halg.statesize > HASH_MAX_STATESIZE ||
alg->halg.statesize == 0)
if (alg->halg.statesize == 0)
return -EINVAL;
err = hash_prepare_alg(&alg->halg);
if (err)
return err;
base->cra_type = &crypto_ahash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
return 0;

View File

@ -5,23 +5,20 @@
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <tadeusz.struk@intel.com>
*/
#include <crypto/internal/akcipher.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/compiler.h>
#include <crypto/algapi.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/akcipher.h>
#include <crypto/internal/akcipher.h>
#include "internal.h"
#ifdef CONFIG_NET
static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_akcipher_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@ -32,12 +29,6 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
#else
static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -76,6 +67,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
akcipher->free(akcipher);
}
static int __maybe_unused crypto_akcipher_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
struct crypto_istat_akcipher *istat;
struct crypto_stat_akcipher rakcipher;
istat = akcipher_get_stat(akcipher);
memset(&rakcipher, 0, sizeof(rakcipher));
strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
@ -83,7 +98,12 @@ static const struct crypto_type crypto_akcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_akcipher_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_akcipher_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
@ -108,11 +128,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)

View File

@ -339,8 +339,6 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
list_add(&alg->cra_list, &crypto_alg_list);
crypto_stats_init(alg);
if (larval) {
/* No cheating! */
alg->cra_flags &= ~CRYPTO_ALG_TESTED;
@ -493,7 +491,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
return;
if (alg->cra_destroy)
alg->cra_destroy(alg);
@ -1038,219 +1038,6 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
}
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
#ifdef CONFIG_CRYPTO_STATS
void crypto_stats_init(struct crypto_alg *alg)
{
memset(&alg->stats, 0, sizeof(alg->stats));
}
EXPORT_SYMBOL_GPL(crypto_stats_init);
void crypto_stats_get(struct crypto_alg *alg)
{
crypto_alg_get(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_get);
void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.aead.err_cnt);
} else {
atomic64_inc(&alg->stats.aead.encrypt_cnt);
atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.aead.err_cnt);
} else {
atomic64_inc(&alg->stats.aead.decrypt_cnt);
atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.akcipher.err_cnt);
} else {
atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.akcipher.err_cnt);
} else {
atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
atomic64_inc(&alg->stats.akcipher.err_cnt);
else
atomic64_inc(&alg->stats.akcipher.sign_cnt);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
atomic64_inc(&alg->stats.akcipher.err_cnt);
else
atomic64_inc(&alg->stats.akcipher.verify_cnt);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.compress.err_cnt);
} else {
atomic64_inc(&alg->stats.compress.compress_cnt);
atomic64_add(slen, &alg->stats.compress.compress_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_compress);
void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.compress.err_cnt);
} else {
atomic64_inc(&alg->stats.compress.decompress_cnt);
atomic64_add(slen, &alg->stats.compress.decompress_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_decompress);
void crypto_stats_ahash_update(unsigned int nbytes, int ret,
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
atomic64_inc(&alg->stats.hash.err_cnt);
else
atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
void crypto_stats_ahash_final(unsigned int nbytes, int ret,
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.hash.err_cnt);
} else {
atomic64_inc(&alg->stats.hash.hash_cnt);
atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);
void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
{
if (ret)
atomic64_inc(&alg->stats.kpp.err_cnt);
else
atomic64_inc(&alg->stats.kpp.setsecret_cnt);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret);
void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
{
if (ret)
atomic64_inc(&alg->stats.kpp.err_cnt);
else
atomic64_inc(&alg->stats.kpp.generate_public_key_cnt);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key);
void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
{
if (ret)
atomic64_inc(&alg->stats.kpp.err_cnt);
else
atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret);
void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
atomic64_inc(&alg->stats.rng.err_cnt);
else
atomic64_inc(&alg->stats.rng.seed_cnt);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_rng_seed);
void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen,
int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.rng.err_cnt);
} else {
atomic64_inc(&alg->stats.rng.generate_cnt);
atomic64_add(dlen, &alg->stats.rng.generate_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_rng_generate);
void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret,
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.cipher.err_cnt);
} else {
atomic64_inc(&alg->stats.cipher.encrypt_cnt);
atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt);
void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic64_inc(&alg->stats.cipher.err_cnt);
} else {
atomic64_inc(&alg->stats.cipher.decrypt_cnt);
atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen);
}
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
#endif
static void __init crypto_start_tests(void)
{
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))

View File

@ -235,24 +235,31 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
char state[HASH_MAX_STATESIZE];
struct crypto_ahash *tfm;
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
char *state;
bool more;
int err;
tfm = crypto_ahash_reqtfm(req);
state = kmalloc(crypto_ahash_statesize(tfm), GFP_KERNEL);
err = -ENOMEM;
if (!state)
goto out;
lock_sock(sk);
more = ctx->more;
err = more ? crypto_ahash_export(req, state) : 0;
release_sock(sk);
if (err)
return err;
goto out_free_state;
err = af_alg_accept(ask->parent, newsock, kern);
if (err)
return err;
goto out_free_state;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
@ -260,7 +267,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
ctx2->more = more;
if (!more)
return err;
goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
@ -268,6 +275,10 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
sock_put(sk2);
}
out_free_state:
kfree_sensitive(state);
out:
return err;
}

View File

@ -408,6 +408,7 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
goto out_err;
tfm->__crt_alg = alg;
refcount_set(&tfm->refcnt, 1);
err = crypto_init_ops(tfm, type, mask);
if (err)
@ -487,26 +488,43 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend,
int node)
static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
const struct crypto_type *frontend, int node,
gfp_t gfp)
{
char *mem;
struct crypto_tfm *tfm = NULL;
struct crypto_tfm *tfm;
unsigned int tfmsize;
unsigned int total;
int err = -ENOMEM;
char *mem;
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
mem = kzalloc_node(total, GFP_KERNEL, node);
mem = kzalloc_node(total, gfp, node);
if (mem == NULL)
goto out_err;
return ERR_PTR(-ENOMEM);
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
tfm->node = node;
refcount_set(&tfm->refcnt, 1);
return mem;
}
void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend,
int node)
{
struct crypto_tfm *tfm;
char *mem;
int err;
mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
if (IS_ERR(mem))
goto out;
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
err = frontend->init_tfm(tfm);
if (err)
@ -523,13 +541,38 @@ out_free_tfm:
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(mem);
out_err:
mem = ERR_PTR(err);
out:
return mem;
}
EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm)
{
struct crypto_alg *alg = otfm->__crt_alg;
struct crypto_tfm *tfm;
char *mem;
mem = ERR_PTR(-ESTALE);
if (unlikely(!crypto_mod_get(alg)))
goto out;
mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
if (IS_ERR(mem)) {
crypto_mod_put(alg);
goto out;
}
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
tfm->crt_flags = otfm->crt_flags;
tfm->exit = otfm->exit;
out:
return mem;
}
EXPORT_SYMBOL_GPL(crypto_clone_tfm);
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
@ -619,6 +662,8 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
if (IS_ERR_OR_NULL(mem))
return;
if (!refcount_dec_and_test(&tfm->refcnt))
return;
alg = tfm->__crt_alg;
if (!tfm->exit && alg->cra_exit)

View File

@ -11,8 +11,8 @@
#include <linux/async_tx.h>
#include <linux/gfp.h>
/**
* pq_scribble_page - space to hold throwaway P or Q buffer for
/*
* struct pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/
static struct page *pq_scribble_page;
@ -28,7 +28,7 @@ static struct page *pq_scribble_page;
#define MAX_DISKS 255
/**
/*
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
@ -100,7 +100,7 @@ do_async_gen_syndrome(struct dma_chan *chan,
return tx;
}
/**
/*
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
*/
static void
@ -281,7 +281,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
* @offset: common offset into each block (src and dest) to start transaction
* @offsets: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set

View File

@ -124,7 +124,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/**
* submit_disposition - flags for routing an incoming operation
* enum submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(async_trigger_callback);
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
* @tx - transaction to quiesce
* @tx: transaction to quiesce
*/
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{

26
crypto/compress.h Normal file
View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Cryptographic API.
*
* Copyright 2015 LG Electronics Inc.
* Copyright (c) 2016, Intel Corporation
* Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _LOCAL_CRYPTO_COMPRESS_H
#define _LOCAL_CRYPTO_COMPRESS_H
#include "internal.h"
struct acomp_req;
struct comp_alg_common;
struct sk_buff;
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg);
void comp_prepare_alg(struct comp_alg_common *alg);
#endif /* _LOCAL_CRYPTO_COMPRESS_H */

View File

@ -427,12 +427,12 @@ err_free_inst:
return err;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct ahash_instance *inst = ahash_alg_instance(tfm);
struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_spawn_shash(spawn);
@ -440,15 +440,30 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
return PTR_ERR(hash);
ctx->child = hash;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
crypto_ahash_set_reqsize(tfm,
sizeof(struct cryptd_hash_request_ctx) +
crypto_shash_descsize(hash));
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
struct crypto_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_clone_shash(ctx->child);
if (IS_ERR(hash))
return PTR_ERR(hash);
nctx->child = hash;
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
crypto_free_shash(ctx->child);
}
@ -677,8 +692,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
inst->alg.init_tfm = cryptd_hash_init_tfm;
inst->alg.clone_tfm = cryptd_hash_clone_tfm;
inst->alg.exit_tfm = cryptd_hash_exit_tfm;
inst->alg.init = cryptd_hash_init_enqueue;
inst->alg.update = cryptd_hash_update_enqueue;

View File

@ -6,18 +6,14 @@
*
*/
#include <linux/crypto.h>
#include <linux/cryptouser.h>
#include <linux/sched.h>
#include <crypto/algapi.h>
#include <crypto/internal/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/internal/cryptouser.h>
#include "internal.h"
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
@ -28,23 +24,6 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_aead raead;
memset(&raead, 0, sizeof(raead));
strscpy(raead.type, "aead", sizeof(raead.type));
raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
}
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_cipher rcipher;
@ -53,12 +32,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt);
rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt);
rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
@ -69,112 +42,10 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
memset(&rcomp, 0, sizeof(rcomp));
strscpy(rcomp.type, "compression", sizeof(rcomp.type));
rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
}
static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_compress racomp;
memset(&racomp, 0, sizeof(racomp));
strscpy(racomp.type, "acomp", sizeof(racomp.type));
racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
}
static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_akcipher rakcipher;
memset(&rakcipher, 0, sizeof(rakcipher));
strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt);
rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen);
rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt);
rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_kpp rkpp;
memset(&rkpp, 0, sizeof(rkpp));
strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
}
static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_hash rhash;
memset(&rhash, 0, sizeof(rhash));
strscpy(rhash.type, "ahash", sizeof(rhash.type));
rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_hash rhash;
memset(&rhash, 0, sizeof(rhash));
strscpy(rhash.type, "shash", sizeof(rhash.type));
rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_rng rrng;
memset(&rrng, 0, sizeof(rrng));
strscpy(rrng.type, "rng", sizeof(rrng.type));
rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
}
static int crypto_reportstat_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg,
struct sk_buff *skb)
@ -204,15 +75,13 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
goto out;
}
if (alg->cra_type && alg->cra_type->report_stat) {
if (alg->cra_type->report_stat(skb, alg))
goto nla_put_failure;
goto out;
}
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_AEAD:
if (crypto_report_aead(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_SKCIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
@ -221,34 +90,6 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_ACOMPRESS:
if (crypto_report_acomp(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_SCOMPRESS:
if (crypto_report_acomp(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_AKCIPHER:
if (crypto_report_akcipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_KPP:
if (crypto_report_kpp(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_AHASH:
if (crypto_report_ahash(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_HASH:
if (crypto_report_shash(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_RNG:
if (crypto_report_rng(skb, alg))
goto nla_put_failure;
break;
default:
pr_err("ERROR: Unhandled alg %d in %s\n",
alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),

View File

@ -1546,7 +1546,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
const int err = PTR_ERR(drbg->jent);
drbg->jent = NULL;
if (fips_enabled || err != -ENOENT)
if (fips_enabled)
return err;
pr_info("DRBG: Continuing without Jitter RNG\n");
}

View File

@ -66,20 +66,11 @@ static struct ctl_table crypto_sysctl_table[] = {
{}
};
static struct ctl_table crypto_dir_table[] = {
{
.procname = "crypto",
.mode = 0555,
.child = crypto_sysctl_table
},
{}
};
static struct ctl_table_header *crypto_sysctls;
static void crypto_proc_fips_init(void)
{
crypto_sysctls = register_sysctl_table(crypto_dir_table);
crypto_sysctls = register_sysctl("crypto", crypto_sysctl_table);
}
static void crypto_proc_fips_exit(void)

40
crypto/hash.h Normal file
View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Cryptographic API.
*
* Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _LOCAL_CRYPTO_HASH_H
#define _LOCAL_CRYPTO_HASH_H
#include <crypto/internal/hash.h>
#include <linux/cryptouser.h>
#include "internal.h"
static inline int crypto_hash_report_stat(struct sk_buff *skb,
struct crypto_alg *alg,
const char *type)
{
struct hash_alg_common *halg = __crypto_hash_alg_common(alg);
struct crypto_istat_hash *istat = hash_get_stat(halg);
struct crypto_stat_hash rhash;
memset(&rhash, 0, sizeof(rhash));
strscpy(rhash.type, type, sizeof(rhash.type));
rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt);
rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen);
rhash.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
struct crypto_ahash *hash);
int hash_prepare_alg(struct hash_alg_common *alg);
#endif /* _LOCAL_CRYPTO_HASH_H */

View File

@ -160,6 +160,20 @@ static int hmac_init_tfm(struct crypto_shash *parent)
return 0;
}
static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
{
struct hmac_ctx *sctx = hmac_ctx(src);
struct hmac_ctx *dctx = hmac_ctx(dst);
struct crypto_shash *hash;
hash = crypto_clone_shash(sctx->hash);
if (IS_ERR(hash))
return PTR_ERR(hash);
dctx->hash = hash;
return 0;
}
static void hmac_exit_tfm(struct crypto_shash *parent)
{
struct hmac_ctx *ctx = hmac_ctx(parent);
@ -227,6 +241,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.import = hmac_import;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
inst->alg.clone_tfm = hmac_clone_tfm;
inst->alg.exit_tfm = hmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;

View File

@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/jump_label.h>
#include <linux/list.h>
#include <linux/module.h>
@ -47,6 +48,8 @@ extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern struct blocking_notifier_head crypto_chain;
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
static inline bool crypto_boot_test_finished(void)
{
@ -103,6 +106,8 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@ -184,5 +189,10 @@ static inline int crypto_is_test_larval(struct crypto_larval *larval)
return larval->alg.cra_driver_name[0];
}
static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm)
{
return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW);
}
#endif /* _CRYPTO_INTERNAL_H */

View File

@ -37,6 +37,7 @@
* DAMAGE.
*/
#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@ -59,11 +60,6 @@ void jent_zfree(void *ptr)
kfree_sensitive(ptr);
}
void jent_panic(char *s)
{
panic("%s", s);
}
void jent_memcpy(void *dest, const void *src, unsigned int n)
{
memcpy(dest, src, n);
@ -102,7 +98,6 @@ void jent_get_nstime(__u64 *out)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
unsigned int reset_cnt;
};
static int jent_kcapi_init(struct crypto_tfm *tfm)
@ -138,32 +133,30 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
spin_lock(&rng->jent_lock);
/* Return a permanent error in case we had too many resets in a row. */
if (rng->reset_cnt > (1<<10)) {
ret = -EFAULT;
goto out;
}
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
/* Reset RNG in case of health failures */
if (ret < -1) {
pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
(ret == -2) ? "Repetition Count Test" :
"Adaptive Proportion Test");
rng->reset_cnt++;
if (ret == -3) {
/* Handle permanent health test error */
/*
* If the kernel was booted with fips=1, it implies that
* the entire kernel acts as a FIPS 140 module. In this case
* an SP800-90B permanent health test error is treated as
* a FIPS module error.
*/
if (fips_enabled)
panic("Jitter RNG permanent health test failure\n");
pr_err("Jitter RNG permanent health test failure\n");
ret = -EFAULT;
} else if (ret == -2) {
/* Handle intermittent health test error */
pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
ret = -EAGAIN;
} else {
rng->reset_cnt = 0;
/* Convert the Jitter RNG error into a usable error code */
if (ret == -1)
ret = -EINVAL;
} else if (ret == -1) {
/* Handle other errors */
ret = -EINVAL;
}
out:
spin_unlock(&rng->jent_lock);
return ret;
@ -197,6 +190,10 @@ static int __init jent_mod_init(void)
ret = jent_entropy_init();
if (ret) {
/* Handle permanent health test error */
if (fips_enabled)
panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
return -EFAULT;
}

View File

@ -85,10 +85,14 @@ struct rand_data {
* bit generation */
/* Repetition Count Test */
int rct_count; /* Number of stuck values */
unsigned int rct_count; /* Number of stuck values */
/* Adaptive Proportion Test for a significance level of 2^-30 */
/* Intermittent health test failure threshold of 2^-30 */
#define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */
#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
/* Permanent health test failure threshold of 2^-60 */
#define JENT_RCT_CUTOFF_PERMANENT 60
#define JENT_APT_CUTOFF_PERMANENT 355
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
@ -97,8 +101,6 @@ struct rand_data {
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
unsigned int apt_base_set:1; /* APT base reference set? */
unsigned int health_failure:1; /* Permanent health failure */
};
/* Flags that can be used to initialize the RNG */
@ -169,19 +171,26 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
return;
}
if (delta_masked == ec->apt_base) {
if (delta_masked == ec->apt_base)
ec->apt_count++;
if (ec->apt_count >= JENT_APT_CUTOFF)
ec->health_failure = 1;
}
ec->apt_observations++;
if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
jent_apt_reset(ec, delta_masked);
}
/* APT health test failure detection */
static int jent_apt_permanent_failure(struct rand_data *ec)
{
return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0;
}
static int jent_apt_failure(struct rand_data *ec)
{
return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0;
}
/***************************************************************************
* Stuck Test and its use as Repetition Count Test
*
@ -206,55 +215,14 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
*/
static void jent_rct_insert(struct rand_data *ec, int stuck)
{
/*
* If we have a count less than zero, a previous RCT round identified
* a failure. We will not overwrite it.
*/
if (ec->rct_count < 0)
return;
if (stuck) {
ec->rct_count++;
/*
* The cutoff value is based on the following consideration:
* alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
* In addition, we require an entropy value H of 1/OSR as this
* is the minimum entropy required to provide full entropy.
* Note, we collect 64 * OSR deltas for inserting them into
* the entropy pool which should then have (close to) 64 bits
* of entropy.
*
* Note, ec->rct_count (which equals to value B in the pseudo
* code of SP800-90B section 4.4.1) starts with zero. Hence
* we need to subtract one from the cutoff value as calculated
* following SP800-90B.
*/
if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
ec->rct_count = -1;
ec->health_failure = 1;
}
} else {
/* Reset RCT */
ec->rct_count = 0;
}
}
/*
* Is there an RCT health test failure?
*
* @ec [in] Reference to entropy collector
*
* @return
* 0 No health test failure
* 1 Permanent health test failure
*/
static int jent_rct_failure(struct rand_data *ec)
{
if (ec->rct_count < 0)
return 1;
return 0;
}
static inline __u64 jent_delta(__u64 prev, __u64 next)
{
#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
@ -303,18 +271,26 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
return 0;
}
/*
* Report any health test failures
*
* @ec [in] Reference to entropy collector
*
* @return
* 0 No health test failure
* 1 Permanent health test failure
*/
/* RCT health test failure detection */
static int jent_rct_permanent_failure(struct rand_data *ec)
{
return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0;
}
static int jent_rct_failure(struct rand_data *ec)
{
return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0;
}
/* Report of health test failures */
static int jent_health_failure(struct rand_data *ec)
{
return ec->health_failure;
return jent_rct_failure(ec) | jent_apt_failure(ec);
}
static int jent_permanent_health_failure(struct rand_data *ec)
{
return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec);
}
/***************************************************************************
@ -600,8 +576,8 @@ static void jent_gen_entropy(struct rand_data *ec)
*
* The following error codes can occur:
* -1 entropy_collector is NULL
* -2 RCT failed
* -3 APT test failed
* -2 Intermittent health failure
* -3 Permanent health failure
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
@ -616,39 +592,23 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
jent_gen_entropy(ec);
if (jent_health_failure(ec)) {
int ret;
if (jent_rct_failure(ec))
ret = -2;
else
ret = -3;
if (jent_permanent_health_failure(ec)) {
/*
* Re-initialize the noise source
*
* If the health test fails, the Jitter RNG remains
* in failure state and will return a health failure
* during next invocation.
* At this point, the Jitter RNG instance is considered
* as a failed instance. There is no rerun of the
* startup test any more, because the caller
* is assumed to not further use this instance.
*/
return -3;
} else if (jent_health_failure(ec)) {
/*
* Perform startup health tests and return permanent
* error if it fails.
*/
if (jent_entropy_init())
return ret;
return -3;
/* Set APT to initial state */
jent_apt_reset(ec, 0);
ec->apt_base_set = 0;
/* Set RCT to initial state */
ec->rct_count = 0;
/* Re-enable Jitter RNG */
ec->health_failure = 0;
/*
* Return the health test failure status to the
* caller as the generated value is not appropriate.
*/
return ret;
return -2;
}
if ((DATA_SIZE_BITS / 8) < len)

View File

@ -2,7 +2,6 @@
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
extern void jent_panic(char *s);
extern void jent_memcpy(void *dest, const void *src, unsigned int n);
extern void jent_get_nstime(__u64 *out);

View File

@ -5,23 +5,20 @@
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*/
#include <crypto/internal/kpp.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <linux/cryptouser.h>
#include <linux/compiler.h>
#include <net/netlink.h>
#include <crypto/kpp.h>
#include <crypto/internal/kpp.h>
#include "internal.h"
#ifdef CONFIG_NET
static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_kpp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
@ -31,12 +28,6 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
}
#else
static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -75,6 +66,29 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst)
kpp->free(kpp);
}
static int __maybe_unused crypto_kpp_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct kpp_alg *kpp = __crypto_kpp_alg(alg);
struct crypto_istat_kpp *istat;
struct crypto_stat_kpp rkpp;
istat = kpp_get_stat(kpp);
memset(&rkpp, 0, sizeof(rkpp));
strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
rkpp.stat_generate_public_key_cnt =
atomic64_read(&istat->generate_public_key_cnt);
rkpp.stat_compute_shared_secret_cnt =
atomic64_read(&istat->compute_shared_secret_cnt);
rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
}
static const struct crypto_type crypto_kpp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_kpp_init_tfm,
@ -82,7 +96,12 @@ static const struct crypto_type crypto_kpp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_kpp_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_kpp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_kpp_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
@ -112,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp);
static void kpp_prepare_alg(struct kpp_alg *alg)
{
struct crypto_istat_kpp *istat = kpp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_kpp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
}
int crypto_register_kpp(struct kpp_alg *alg)

View File

@ -8,17 +8,17 @@
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <linux/atomic.h>
#include <crypto/internal/rng.h>
#include <linux/atomic.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/cryptouser.h>
#include <linux/compiler.h>
#include <net/netlink.h>
#include "internal.h"
@ -30,27 +30,30 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
struct crypto_alg *alg = tfm->base.__crt_alg;
struct rng_alg *alg = crypto_rng_alg(tfm);
u8 *buf = NULL;
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&rng_get_stat(alg)->seed_cnt);
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
err = -ENOMEM;
if (!buf)
return -ENOMEM;
goto out;
err = get_random_bytes_wait(buf, slen);
if (err)
goto out;
goto free_buf;
seed = buf;
}
crypto_stats_get(alg);
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
crypto_stats_rng_seed(alg, err);
out:
err = alg->seed(tfm, seed, slen);
free_buf:
kfree_sensitive(buf);
return err;
out:
return crypto_rng_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_rng_reset);
@ -66,8 +69,8 @@ static unsigned int seedsize(struct crypto_alg *alg)
return ralg->seedsize;
}
#ifdef CONFIG_NET
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_rng_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
@ -79,12 +82,6 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
}
#else
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -94,13 +91,39 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "seedsize : %u\n", seedsize(alg));
}
static int __maybe_unused crypto_rng_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct rng_alg *rng = __crypto_rng_alg(alg);
struct crypto_istat_rng *istat;
struct crypto_stat_rng rrng;
istat = rng_get_stat(rng);
memset(&rrng, 0, sizeof(rrng));
strscpy(rrng.type, "rng", sizeof(rrng.type));
rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
}
static const struct crypto_type crypto_rng_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_rng_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_rng_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_rng_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
@ -176,6 +199,7 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
int crypto_register_rng(struct rng_alg *alg)
{
struct crypto_istat_rng *istat = rng_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->seedsize > PAGE_SIZE / 8)
@ -185,6 +209,9 @@ int crypto_register_rng(struct rng_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_rng);

View File

@ -6,23 +6,22 @@
* Copyright (c) 2016, Intel Corporation
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
#include <linux/errno.h>
#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/compiler.h>
#include <linux/vmalloc.h>
#include <crypto/algapi.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include "internal.h"
#include "compress.h"
struct scomp_scratch {
spinlock_t lock;
@ -38,8 +37,8 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
#ifdef CONFIG_NET
static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_scomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rscomp;
@ -50,12 +49,6 @@ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(rscomp), &rscomp);
}
#else
static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -247,7 +240,12 @@ static const struct crypto_type crypto_scomp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_scomp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_acomp_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
@ -256,10 +254,11 @@ static const struct crypto_type crypto_scomp_type = {
int crypto_register_scomp(struct scomp_alg *alg)
{
struct crypto_alg *base = &alg->base;
struct crypto_alg *base = &alg->calg.base;
comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_scomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
return crypto_register_alg(base);

View File

@ -6,22 +6,31 @@
*/
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <linux/string.h>
#include <net/netlink.h>
#include <linux/compiler.h>
#include "internal.h"
#include "hash.h"
#define MAX_SHASH_ALIGNMASK 63
static const struct crypto_type crypto_shash_type;
static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
{
return hash_get_stat(&alg->halg);
}
static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
{
return crypto_hash_errstat(&alg->halg, err);
}
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@ -114,11 +123,17 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
if ((unsigned long)data & alignmask)
return shash_update_unaligned(desc, data, len);
err = shash_update_unaligned(desc, data, len);
else
err = shash->update(desc, data, len);
return shash->update(desc, data, len);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
@ -155,19 +170,25 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out)
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&shash_get_stat(shash)->hash_cnt);
if ((unsigned long)out & alignmask)
return shash_final_unaligned(desc, out);
err = shash_final_unaligned(desc, out);
else
err = shash->final(desc, out);
return shash->final(desc, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_update(desc, data, len) ?:
crypto_shash_final(desc, out);
return shash_update_unaligned(desc, data, len) ?:
shash_final_unaligned(desc, out);
}
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
@ -176,11 +197,22 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = shash_get_stat(shash);
atomic64_inc(&istat->hash_cnt);
atomic64_add(len, &istat->hash_tlen);
}
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_finup_unaligned(desc, data, len, out);
err = shash_finup_unaligned(desc, data, len, out);
else
err = shash->finup(desc, data, len, out);
return shash->finup(desc, data, len, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
@ -188,7 +220,8 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
crypto_shash_finup(desc, data, len, out);
shash_update_unaligned(desc, data, len) ?:
shash_final_unaligned(desc, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@ -197,14 +230,23 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = shash_get_stat(shash);
atomic64_inc(&istat->hash_cnt);
atomic64_add(len, &istat->hash_tlen);
}
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
err = -ENOKEY;
else if (((unsigned long)data | (unsigned long)out) & alignmask)
err = shash_digest_unaligned(desc, data, len, out);
else
err = shash->digest(desc, data, len, out);
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
return shash->digest(desc, data, len, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@ -403,6 +445,24 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
return 0;
}
struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
struct crypto_ahash *hash)
{
struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
struct crypto_shash **ctx = crypto_ahash_ctx(hash);
struct crypto_shash *shash;
shash = crypto_clone_shash(*ctx);
if (IS_ERR(shash)) {
crypto_free_ahash(nhash);
return ERR_CAST(shash);
}
*nctx = shash;
return nhash;
}
static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
@ -448,8 +508,8 @@ static void crypto_shash_free_instance(struct crypto_instance *inst)
shash->free(shash);
}
#ifdef CONFIG_NET
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_shash_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
@ -463,12 +523,6 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
#else
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@ -481,6 +535,12 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
static int __maybe_unused crypto_shash_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
return crypto_hash_report_stat(skb, alg, "shash");
}
static const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
@ -488,7 +548,12 @@ static const struct crypto_type crypto_shash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_shash_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_shash_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
@ -517,13 +582,62 @@ int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_shash);
static int shash_prepare_alg(struct shash_alg *alg)
struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
{
struct crypto_tfm *tfm = crypto_shash_tfm(hash);
struct shash_alg *alg = crypto_shash_alg(hash);
struct crypto_shash *nhash;
int err;
if (!crypto_shash_alg_has_setkey(alg)) {
tfm = crypto_tfm_get(tfm);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
return hash;
}
if (!alg->clone_tfm)
return ERR_PTR(-ENOSYS);
nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
if (IS_ERR(nhash))
return nhash;
nhash->descsize = hash->descsize;
err = alg->clone_tfm(nhash, hash);
if (err) {
crypto_free_shash(nhash);
return ERR_PTR(err);
}
return nhash;
}
EXPORT_SYMBOL_GPL(crypto_clone_shash);
int hash_prepare_alg(struct hash_alg_common *alg)
{
struct crypto_istat_hash *istat = hash_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
alg->descsize > HASH_MAX_DESCSIZE ||
alg->statesize > HASH_MAX_STATESIZE)
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
return -EINVAL;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return 0;
}
static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
if (alg->descsize > HASH_MAX_DESCSIZE)
return -EINVAL;
if (base->cra_alignmask > MAX_SHASH_ALIGNMASK)
@ -532,8 +646,11 @@ static int shash_prepare_alg(struct shash_alg *alg)
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
err = hash_prepare_alg(&alg->halg);
if (err)
return err;
base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
if (!alg->finup)
@ -543,7 +660,7 @@ static int shash_prepare_alg(struct shash_alg *alg)
if (!alg->export) {
alg->export = shash_default_export;
alg->import = shash_default_import;
alg->statesize = alg->descsize;
alg->halg.statesize = alg->descsize;
}
if (!alg->setkey)
alg->setkey = shash_no_setkey;

View File

@ -15,11 +15,14 @@
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
@ -77,6 +80,35 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
static inline struct crypto_istat_cipher *skcipher_get_stat(
struct skcipher_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
return &alg->stat;
#else
return NULL;
#endif
}
static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
{
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
return err;
if (err && err != -EINPROGRESS && err != -EBUSY)
atomic64_inc(&istat->err_cnt);
return err;
}
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@ -605,34 +637,44 @@ EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_alg *alg = tfm->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
crypto_stats_get(alg);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
atomic64_inc(&istat->encrypt_cnt);
atomic64_add(req->cryptlen, &istat->encrypt_tlen);
}
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_skcipher_alg(tfm)->encrypt(req);
crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
return ret;
ret = alg->encrypt(req);
return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_alg *alg = tfm->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
crypto_stats_get(alg);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
atomic64_inc(&istat->decrypt_cnt);
atomic64_add(req->cryptlen, &istat->decrypt_tlen);
}
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_skcipher_alg(tfm)->decrypt(req);
crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
return ret;
ret = alg->decrypt(req);
return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
@ -672,8 +714,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
base);
struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
@ -686,12 +727,11 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "walksize : %u\n", skcipher->walksize);
}
#ifdef CONFIG_NET
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_skcipher_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_report_blkcipher rblkcipher;
struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
base);
memset(&rblkcipher, 0, sizeof(rblkcipher));
@ -706,12 +746,28 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(rblkcipher), &rblkcipher);
}
#else
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
static int __maybe_unused crypto_skcipher_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_istat_cipher *istat;
struct crypto_stat_cipher rcipher;
istat = skcipher_get_stat(skcipher);
memset(&rcipher, 0, sizeof(rcipher));
strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
#endif
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_alg_extsize,
@ -720,7 +776,12 @@ static const struct crypto_type crypto_skcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
#ifdef CONFIG_CRYPTO_USER
.report = crypto_skcipher_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_skcipher_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
@ -775,6 +836,7 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
static int skcipher_prepare_alg(struct skcipher_alg *alg)
{
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@ -790,6 +852,9 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return 0;
}

View File

@ -25,14 +25,17 @@
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timex.h>
#include "internal.h"
#include "tcrypt.h"
/*

View File

@ -860,12 +860,50 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/* Generate a random length in range [0, max_len], but prefer smaller values */
static unsigned int generate_random_length(unsigned int max_len)
{
unsigned int len = get_random_u32_below(max_len + 1);
/*
* The fuzz tests use prandom instead of the normal Linux RNG since they don't
* need cryptographically secure random numbers. This greatly improves the
* performance of these tests, especially if they are run before the Linux RNG
* has been initialized or if they are run on a lockdep-enabled kernel.
*/
switch (get_random_u32_below(4)) {
static inline void init_rnd_state(struct rnd_state *rng)
{
prandom_seed_state(rng, get_random_u64());
}
static inline u8 prandom_u8(struct rnd_state *rng)
{
return prandom_u32_state(rng);
}
static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
{
/*
* This is slightly biased for non-power-of-2 values of 'ceil', but this
* isn't important here.
*/
return prandom_u32_state(rng) % ceil;
}
static inline bool prandom_bool(struct rnd_state *rng)
{
return prandom_u32_below(rng, 2);
}
static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
u32 floor, u32 ceil)
{
return floor + prandom_u32_below(rng, ceil - floor + 1);
}
/* Generate a random length in range [0, max_len], but prefer smaller values */
static unsigned int generate_random_length(struct rnd_state *rng,
unsigned int max_len)
{
unsigned int len = prandom_u32_below(rng, max_len + 1);
switch (prandom_u32_below(rng, 4)) {
case 0:
return len % 64;
case 1:
@ -878,43 +916,44 @@ static unsigned int generate_random_length(unsigned int max_len)
}
/* Flip a random bit in the given nonempty data buffer */
static void flip_random_bit(u8 *buf, size_t size)
static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t bitpos;
bitpos = get_random_u32_below(size * 8);
bitpos = prandom_u32_below(rng, size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8);
}
/* Flip a random byte in the given nonempty data buffer */
static void flip_random_byte(u8 *buf, size_t size)
static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
{
buf[get_random_u32_below(size)] ^= 0xff;
buf[prandom_u32_below(rng, size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */
static void mutate_buffer(u8 *buf, size_t size)
static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t num_flips;
size_t i;
/* Sometimes flip some bits */
if (get_random_u32_below(4) == 0) {
num_flips = min_t(size_t, 1 << get_random_u32_below(8), size * 8);
if (prandom_u32_below(rng, 4) == 0) {
num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
size * 8);
for (i = 0; i < num_flips; i++)
flip_random_bit(buf, size);
flip_random_bit(rng, buf, size);
}
/* Sometimes flip some bytes */
if (get_random_u32_below(4) == 0) {
num_flips = min_t(size_t, 1 << get_random_u32_below(8), size);
if (prandom_u32_below(rng, 4) == 0) {
num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
for (i = 0; i < num_flips; i++)
flip_random_byte(buf, size);
flip_random_byte(rng, buf, size);
}
}
/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
static void generate_random_bytes(u8 *buf, size_t count)
static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
{
u8 b;
u8 increment;
@ -923,11 +962,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
if (count == 0)
return;
switch (get_random_u32_below(8)) { /* Choose a generation strategy */
switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
case 0:
case 1:
/* All the same byte, plus optional mutations */
switch (get_random_u32_below(4)) {
switch (prandom_u32_below(rng, 4)) {
case 0:
b = 0x00;
break;
@ -935,28 +974,28 @@ static void generate_random_bytes(u8 *buf, size_t count)
b = 0xff;
break;
default:
b = get_random_u8();
b = prandom_u8(rng);
break;
}
memset(buf, b, count);
mutate_buffer(buf, count);
mutate_buffer(rng, buf, count);
break;
case 2:
/* Ascending or descending bytes, plus optional mutations */
increment = get_random_u8();
b = get_random_u8();
increment = prandom_u8(rng);
b = prandom_u8(rng);
for (i = 0; i < count; i++, b += increment)
buf[i] = b;
mutate_buffer(buf, count);
mutate_buffer(rng, buf, count);
break;
default:
/* Fully random bytes */
for (i = 0; i < count; i++)
buf[i] = get_random_u8();
prandom_bytes_state(rng, buf, count);
}
}
static char *generate_random_sgl_divisions(struct test_sg_division *divs,
static char *generate_random_sgl_divisions(struct rnd_state *rng,
struct test_sg_division *divs,
size_t max_divs, char *p, char *end,
bool gen_flushes, u32 req_flags)
{
@ -967,24 +1006,26 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
unsigned int this_len;
const char *flushtype_str;
if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
if (div == &divs[max_divs - 1] || prandom_bool(rng))
this_len = remaining;
else
this_len = get_random_u32_inclusive(1, remaining);
this_len = prandom_u32_inclusive(rng, 1, remaining);
div->proportion_of_total = this_len;
if (get_random_u32_below(4) == 0)
div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1);
else if (get_random_u32_below(2) == 0)
div->offset = get_random_u32_below(32);
if (prandom_u32_below(rng, 4) == 0)
div->offset = prandom_u32_inclusive(rng,
PAGE_SIZE - 128,
PAGE_SIZE - 1);
else if (prandom_bool(rng))
div->offset = prandom_u32_below(rng, 32);
else
div->offset = get_random_u32_below(PAGE_SIZE);
if (get_random_u32_below(8) == 0)
div->offset = prandom_u32_below(rng, PAGE_SIZE);
if (prandom_u32_below(rng, 8) == 0)
div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) {
switch (get_random_u32_below(4)) {
switch (prandom_u32_below(rng, 4)) {
case 0:
div->flush_type = FLUSH_TYPE_REIMPORT;
break;
@ -996,7 +1037,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
get_random_u32_below(2) == 0)
prandom_bool(rng))
div->nosimd = true;
switch (div->flush_type) {
@ -1031,7 +1072,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
}
/* Generate a random testvec_config for fuzz testing */
static void generate_random_testvec_config(struct testvec_config *cfg,
static void generate_random_testvec_config(struct rnd_state *rng,
struct testvec_config *cfg,
char *name, size_t max_namelen)
{
char *p = name;
@ -1043,7 +1085,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "random:");
switch (get_random_u32_below(4)) {
switch (prandom_u32_below(rng, 4)) {
case 0:
case 1:
cfg->inplace_mode = OUT_OF_PLACE;
@ -1058,12 +1100,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
if (get_random_u32_below(2) == 0) {
if (prandom_bool(rng)) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep");
}
switch (get_random_u32_below(4)) {
switch (prandom_u32_below(rng, 4)) {
case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final");
@ -1078,36 +1120,37 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
get_random_u32_below(2) == 0) {
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
}
p += scnprintf(p, end - p, " src_divs=[");
p = generate_random_sgl_divisions(cfg->src_divs,
p = generate_random_sgl_divisions(rng, cfg->src_divs,
ARRAY_SIZE(cfg->src_divs), p, end,
(cfg->finalization_type !=
FINALIZATION_TYPE_DIGEST),
cfg->req_flags);
p += scnprintf(p, end - p, "]");
if (cfg->inplace_mode == OUT_OF_PLACE && get_random_u32_below(2) == 0) {
if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(cfg->dst_divs,
p = generate_random_sgl_divisions(rng, cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
p, end, false,
cfg->req_flags);
p += scnprintf(p, end - p, "]");
}
if (get_random_u32_below(2) == 0) {
cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
if (prandom_bool(rng)) {
cfg->iv_offset = prandom_u32_inclusive(rng, 1,
MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
if (get_random_u32_below(2) == 0) {
cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
if (prandom_bool(rng)) {
cfg->key_offset = prandom_u32_inclusive(rng, 1,
MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
}
@ -1620,11 +1663,14 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
init_rnd_state(&rng);
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
req, desc, tsgl, hashstate);
@ -1642,15 +1688,16 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
static void generate_random_hash_testvec(struct shash_desc *desc,
static void generate_random_hash_testvec(struct rnd_state *rng,
struct shash_desc *desc,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
char *name, size_t max_namelen)
{
/* Data */
vec->psize = generate_random_length(maxdatasize);
generate_random_bytes((u8 *)vec->plaintext, vec->psize);
vec->psize = generate_random_length(rng, maxdatasize);
generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
/*
* Key: length in range [1, maxkeysize], but usually choose maxkeysize.
@ -1660,9 +1707,9 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
vec->ksize = 0;
if (maxkeysize) {
vec->ksize = maxkeysize;
if (get_random_u32_below(4) == 0)
vec->ksize = get_random_u32_inclusive(1, maxkeysize);
generate_random_bytes((u8 *)vec->key, vec->ksize);
if (prandom_u32_below(rng, 4) == 0)
vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
vec->ksize);
@ -1696,6 +1743,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
const char *driver = crypto_ahash_driver_name(tfm);
struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_shash *generic_tfm = NULL;
struct shash_desc *generic_desc = NULL;
@ -1709,6 +1757,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
if (noextratests)
return 0;
init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@ -1777,10 +1827,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
generate_random_hash_testvec(generic_desc, &vec,
generate_random_hash_testvec(&rng, generic_desc, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
generate_random_testvec_config(&rng, cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(&vec, vec_name, cfg,
req, desc, tsgl, hashstate);
@ -2182,11 +2233,14 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
init_rnd_state(&rng);
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_aead_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@ -2202,6 +2256,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
struct aead_extra_tests_ctx {
struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
const struct alg_test_desc *test_desc;
@ -2220,24 +2275,26 @@ struct aead_extra_tests_ctx {
* here means the full ciphertext including the authentication tag. The
* authentication tag (and hence also the ciphertext) is assumed to be nonempty.
*/
static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
static void mutate_aead_message(struct rnd_state *rng,
struct aead_testvec *vec, bool aad_iv,
unsigned int ivsize)
{
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
if (get_random_u32_below(2) == 0 && vec->alen > aad_tail_size) {
if (prandom_bool(rng) && vec->alen > aad_tail_size) {
/* Mutate the AAD */
flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
if (get_random_u32_below(2) == 0)
flip_random_bit(rng, (u8 *)vec->assoc,
vec->alen - aad_tail_size);
if (prandom_bool(rng))
return;
}
if (get_random_u32_below(2) == 0) {
if (prandom_bool(rng)) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */
flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
} else {
/* Mutate any part of the ciphertext */
flip_random_bit((u8 *)vec->ctext, vec->clen);
flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
}
}
@ -2248,7 +2305,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
*/
#define MIN_COLLISION_FREE_AUTHSIZE 8
static void generate_aead_message(struct aead_request *req,
static void generate_aead_message(struct rnd_state *rng,
struct aead_request *req,
const struct aead_test_suite *suite,
struct aead_testvec *vec,
bool prefer_inauthentic)
@ -2257,17 +2315,18 @@ static void generate_aead_message(struct aead_request *req,
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
(prefer_inauthentic || get_random_u32_below(4) == 0);
(prefer_inauthentic ||
prandom_u32_below(rng, 4) == 0);
/* Generate the AAD. */
generate_random_bytes((u8 *)vec->assoc, vec->alen);
generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
if (suite->aad_iv && vec->alen >= ivsize)
/* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
if (inauthentic && get_random_u32_below(2) == 0) {
if (inauthentic && prandom_bool(rng)) {
/* Generate a random ciphertext. */
generate_random_bytes((u8 *)vec->ctext, vec->clen);
generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
} else {
int i = 0;
struct scatterlist src[2], dst;
@ -2279,7 +2338,7 @@ static void generate_aead_message(struct aead_request *req,
if (vec->alen)
sg_set_buf(&src[i++], vec->assoc, vec->alen);
if (vec->plen) {
generate_random_bytes((u8 *)vec->ptext, vec->plen);
generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
sg_set_buf(&src[i++], vec->ptext, vec->plen);
}
sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
@ -2299,7 +2358,7 @@ static void generate_aead_message(struct aead_request *req,
* Mutate the authentic (ciphertext, AAD) pair to get an
* inauthentic one.
*/
mutate_aead_message(vec, suite->aad_iv, ivsize);
mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
}
vec->novrfy = 1;
if (suite->einval_allowed)
@ -2313,7 +2372,8 @@ static void generate_aead_message(struct aead_request *req,
* If 'prefer_inauthentic' is true, then this function will generate inauthentic
* test vectors (i.e. vectors with 'vec->novrfy=1') more often.
*/
static void generate_random_aead_testvec(struct aead_request *req,
static void generate_random_aead_testvec(struct rnd_state *rng,
struct aead_request *req,
struct aead_testvec *vec,
const struct aead_test_suite *suite,
unsigned int maxkeysize,
@ -2329,18 +2389,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
if (get_random_u32_below(4) == 0)
vec->klen = get_random_u32_below(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen);
if (prandom_u32_below(rng, 4) == 0)
vec->klen = prandom_u32_below(rng, maxkeysize + 1);
generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
/* IV */
generate_random_bytes((u8 *)vec->iv, ivsize);
generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize;
if (get_random_u32_below(4) == 0)
authsize = get_random_u32_below(maxauthsize + 1);
if (prandom_u32_below(rng, 4) == 0)
authsize = prandom_u32_below(rng, maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
@ -2349,11 +2409,11 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
/* AAD, plaintext, and ciphertext lengths */
total_len = generate_random_length(maxdatasize);
if (get_random_u32_below(4) == 0)
total_len = generate_random_length(rng, maxdatasize);
if (prandom_u32_below(rng, 4) == 0)
vec->alen = 0;
else
vec->alen = generate_random_length(total_len);
vec->alen = generate_random_length(rng, total_len);
vec->plen = total_len - vec->alen;
vec->clen = vec->plen + authsize;
@ -2364,7 +2424,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->novrfy = 0;
vec->crypt_error = 0;
if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
generate_aead_message(req, suite, vec, prefer_inauthentic);
generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
snprintf(name, max_namelen,
"\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
@ -2376,7 +2436,7 @@ static void try_to_generate_inauthentic_testvec(
int i;
for (i = 0; i < 10; i++) {
generate_random_aead_testvec(ctx->req, &ctx->vec,
generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
@ -2407,7 +2467,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
*/
try_to_generate_inauthentic_testvec(ctx);
if (ctx->vec.novrfy) {
generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
generate_random_testvec_config(&ctx->rng, &ctx->cfg,
ctx->cfgname,
sizeof(ctx->cfgname));
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
@ -2497,12 +2558,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
* the other implementation against them.
*/
for (i = 0; i < fuzz_iterations * 8; i++) {
generate_random_aead_testvec(generic_req, &ctx->vec,
generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
sizeof(ctx->vec_name), false);
generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
generate_random_testvec_config(&ctx->rng, &ctx->cfg,
ctx->cfgname,
sizeof(ctx->cfgname));
if (!ctx->vec.novrfy) {
err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
@ -2541,6 +2603,7 @@ static int test_aead_extra(const struct alg_test_desc *test_desc,
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
init_rnd_state(&ctx->rng);
ctx->req = req;
ctx->tfm = crypto_aead_reqtfm(req);
ctx->test_desc = test_desc;
@ -2930,11 +2993,14 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
init_rnd_state(&rng);
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@ -2952,7 +3018,8 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
static void generate_random_cipher_testvec(struct skcipher_request *req,
static void generate_random_cipher_testvec(struct rnd_state *rng,
struct skcipher_request *req,
struct cipher_testvec *vec,
unsigned int maxdatasize,
char *name, size_t max_namelen)
@ -2966,17 +3033,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
if (get_random_u32_below(4) == 0)
vec->klen = get_random_u32_below(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen);
if (prandom_u32_below(rng, 4) == 0)
vec->klen = prandom_u32_below(rng, maxkeysize + 1);
generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
/* IV */
generate_random_bytes((u8 *)vec->iv, ivsize);
generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Plaintext */
vec->len = generate_random_length(maxdatasize);
generate_random_bytes((u8 *)vec->ptext, vec->len);
vec->len = generate_random_length(rng, maxdatasize);
generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
/* If the key couldn't be set, no need to continue to encrypt. */
if (vec->setkey_error)
@ -3018,6 +3085,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
const char *driver = crypto_skcipher_driver_name(tfm);
struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_skcipher *generic_tfm = NULL;
struct skcipher_request *generic_req = NULL;
@ -3035,6 +3103,8 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
if (strncmp(algname, "kw(", 3) == 0)
return 0;
init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@ -3119,9 +3189,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
generate_random_cipher_testvec(&rng, generic_req, &vec,
maxdatasize,
vec_name, sizeof(vec_name));
generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
generate_random_testvec_config(&rng, cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
cfg, req, tsgls);
@ -4572,6 +4644,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.hash = __VECS(aes_cmac128_tv_template)
}
}, {
.alg = "cmac(camellia)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(camellia_cmac128_tv_template)
}
}, {
.alg = "cmac(des3_ede)",
.test = alg_test_hash,

View File

@ -25665,6 +25665,53 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = {
/*
* CAMELLIA test vectors.
*/
static const struct hash_testvec camellia_cmac128_tv_template[] = {
{ /* From draft-kato-ipsec-camellia-cmac96and128-01 */
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.plaintext = zeroed_string,
.digest = "\xba\x92\x57\x82\xaa\xa1\xf5\xd9"
"\xa0\x0f\x89\x64\x80\x94\xfc\x71",
.psize = 0,
.ksize = 16,
}, {
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
.digest = "\x6d\x96\x28\x54\xa3\xb9\xfd\xa5"
"\x6d\x7d\x45\xa9\x5e\xe1\x79\x93",
.psize = 16,
.ksize = 16,
}, {
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11",
.digest = "\x5c\x18\xd1\x19\xcc\xd6\x76\x61"
"\x44\xac\x18\x66\x13\x1d\x9f\x22",
.psize = 40,
.ksize = 16,
}, {
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.digest = "\xc2\x69\x9a\x6e\xba\x55\xce\x9d"
"\x93\x9a\x8a\x4e\x19\x46\x6e\xe9",
.psize = 64,
.ksize = 16,
}
};
static const struct cipher_testvec camellia_tv_template[] = {
{
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef"

View File

@ -18,9 +18,7 @@
struct meson_rng_data {
void __iomem *base;
struct platform_device *pdev;
struct hwrng rng;
struct clk *core_clk;
};
static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@ -33,47 +31,28 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
return sizeof(u32);
}
static void meson_rng_clk_disable(void *data)
{
clk_disable_unprepare(data);
}
static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
int ret;
struct clk *core_clk;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->pdev = pdev;
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
data->core_clk = devm_clk_get_optional(dev, "core");
if (IS_ERR(data->core_clk))
return dev_err_probe(dev, PTR_ERR(data->core_clk),
core_clk = devm_clk_get_optional_enabled(dev, "core");
if (IS_ERR(core_clk))
return dev_err_probe(dev, PTR_ERR(core_clk),
"Failed to get core clock\n");
if (data->core_clk) {
ret = clk_prepare_enable(data->core_clk);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
data->core_clk);
if (ret)
return ret;
}
data->rng.name = pdev->name;
data->rng.read = meson_rng_read;
platform_set_drvdata(pdev, data);
return devm_hwrng_register(dev, &data->rng);
}

View File

@ -84,7 +84,6 @@ struct xgene_rng_dev {
unsigned long failure_ts;/* First failure timestamp */
struct timer_list failure_timer;
struct device *dev;
struct clk *clk;
};
static void xgene_rng_expired_timer(struct timer_list *t)
@ -200,7 +199,7 @@ static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
{
struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
struct xgene_rng_dev *ctx = id;
/* RNG Alarm Counter overflow */
xgene_rng_chk_overflow(ctx);
@ -314,6 +313,7 @@ static struct hwrng xgene_rng_func = {
static int xgene_rng_probe(struct platform_device *pdev)
{
struct xgene_rng_dev *ctx;
struct clk *clk;
int rc = 0;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@ -337,58 +337,36 @@ static int xgene_rng_probe(struct platform_device *pdev)
rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
dev_name(&pdev->dev), ctx);
if (rc) {
dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
return rc;
}
if (rc)
return dev_err_probe(&pdev->dev, rc, "Could not request RNG alarm IRQ\n");
/* Enable IP clock */
ctx->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ctx->clk)) {
dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
} else {
rc = clk_prepare_enable(ctx->clk);
if (rc) {
dev_warn(&pdev->dev,
"clock prepare enable failed for RNG");
return rc;
}
}
clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Couldn't get the clock for RNG\n");
xgene_rng_func.priv = (unsigned long) ctx;
rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func);
if (rc) {
dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
if (!IS_ERR(ctx->clk))
clk_disable_unprepare(ctx->clk);
return rc;
}
if (rc)
return dev_err_probe(&pdev->dev, rc, "RNG registering failed\n");
rc = device_init_wakeup(&pdev->dev, 1);
if (rc) {
dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
rc);
if (!IS_ERR(ctx->clk))
clk_disable_unprepare(ctx->clk);
return rc;
}
if (rc)
return dev_err_probe(&pdev->dev, rc, "RNG device_init_wakeup failed\n");
return 0;
}
static int xgene_rng_remove(struct platform_device *pdev)
{
struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
int rc;
rc = device_init_wakeup(&pdev->dev, 0);
if (rc)
dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
if (!IS_ERR(ctx->clk))
clk_disable_unprepare(ctx->clk);
return rc;
return 0;
}
static const struct of_device_id xgene_rng_of_match[] = {

View File

@ -240,21 +240,6 @@ config CRYPTO_DEV_TALITOS2
Say 'Y' here to use the Freescale Security Engine (SEC)
version 2 and following as found on MPC83xx, MPC85xx, etc ...
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
select CRYPTO_AES
select CRYPTO_DES
select CRYPTO_ECB
select CRYPTO_CBC
select CRYPTO_CTR
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
help
Driver for the IXP4xx NPE crypto engine.
config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
@ -502,10 +487,10 @@ config CRYPTO_DEV_MXS_DCP
To compile this driver as a module, choose M here: the module
will be called mxs-dcp.
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
source "drivers/crypto/intel/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
@ -774,7 +759,7 @@ config CRYPTO_DEV_ARTPEC6
config CRYPTO_DEV_CCREE
tristate "Support for ARM TrustZone CryptoCell family of security processors"
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
default n
depends on HAS_IOMEM
select CRYPTO_HASH
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
@ -810,6 +795,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@ -820,7 +806,6 @@ config CRYPTO_DEV_SA2UL
used for crypto offload. Select this if you want to use hardware
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/keembay/Kconfig"
source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW

View File

@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
@ -33,7 +32,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
@ -51,4 +49,4 @@ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += keembay/
obj-y += intel/

View File

@ -1101,7 +1101,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
u32 clr_val)
{
struct device *dev = (struct device *)data;
struct device *dev = data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);

View File

@ -289,7 +289,7 @@ static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
if (mode == ASPEED_RSA_EXP_MODE)
idx = acry_dev->exp_dw_mapping[j - 1];
else if (mode == ASPEED_RSA_MOD_MODE)
else /* mode == ASPEED_RSA_MOD_MODE */
idx = acry_dev->mod_dw_mapping[j - 1];
dw_buf[idx] = cpu_to_le32(data);
@ -712,7 +712,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
{
struct aspeed_acry_dev *acry_dev;
struct device *dev = &pdev->dev;
struct resource *res;
int rc;
acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
@ -724,13 +723,11 @@ static int aspeed_acry_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acry_dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
acry_dev->regs = devm_ioremap_resource(dev, res);
acry_dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(acry_dev->regs))
return PTR_ERR(acry_dev->regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
acry_dev->acry_sram = devm_ioremap_resource(dev, res);
acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(acry_dev->acry_sram))
return PTR_ERR(acry_dev->acry_sram);
@ -782,7 +779,10 @@ static int aspeed_acry_probe(struct platform_device *pdev)
acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
&acry_dev->buf_dma_addr,
GFP_KERNEL);
memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
if (!acry_dev->buf_addr) {
rc = -ENOMEM;
goto err_engine_rsa_start;
}
aspeed_acry_register(acry_dev);

View File

@ -493,17 +493,11 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
if (req->cryptlen < ivsize)
return;
if (rctx->mode & AES_FLAGS_ENCRYPT) {
if (rctx->mode & AES_FLAGS_ENCRYPT)
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - ivsize, ivsize, 0);
} else {
if (req->src == req->dst)
memcpy(req->iv, rctx->lastc, ivsize);
else
scatterwalk_map_and_copy(req->iv, req->src,
req->cryptlen - ivsize,
ivsize, 0);
}
else
memcpy(req->iv, rctx->lastc, ivsize);
}
static inline struct atmel_aes_ctr_ctx *
@ -1146,7 +1140,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
if (opmode != AES_FLAGS_ECB &&
!(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
!(mode & AES_FLAGS_ENCRYPT)) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@ -1341,7 +1335,7 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.init = atmel_aes_init_tfm,

View File

@ -1948,14 +1948,32 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
struct scatterlist *sgbuf;
size_t hs = ctx->hash_size;
size_t i, num_words = hs / sizeof(u32);
bool use_dma = false;
u32 mr;
/* Special case for empty message. */
if (!req->nbytes)
return atmel_sha_complete(dd, -EINVAL); // TODO:
if (!req->nbytes) {
req->nbytes = 0;
ctx->bufcnt = 0;
ctx->digcnt[0] = 0;
ctx->digcnt[1] = 0;
switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
case SHA_FLAGS_SHA1:
case SHA_FLAGS_SHA224:
case SHA_FLAGS_SHA256:
atmel_sha_fill_padding(ctx, 64);
break;
case SHA_FLAGS_SHA384:
case SHA_FLAGS_SHA512:
atmel_sha_fill_padding(ctx, 128);
break;
}
sg_init_one(&dd->tmp, ctx->buffer, ctx->bufcnt);
}
/* Check DMA threshold and alignment. */
if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
@ -1985,12 +2003,20 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
/* Special case for empty message. */
if (!req->nbytes) {
sgbuf = &dd->tmp;
req->nbytes = ctx->bufcnt;
} else {
sgbuf = req->src;
}
/* Process data. */
if (use_dma)
return atmel_sha_dma_start(dd, req->src, req->nbytes,
return atmel_sha_dma_start(dd, sgbuf, req->nbytes,
atmel_sha_hmac_final_done);
return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
return atmel_sha_cpu_start(dd, sgbuf, req->nbytes, false, true,
atmel_sha_hmac_final_done);
}

View File

@ -126,7 +126,7 @@ static void atmel_sha204a_remove(struct i2c_client *client)
kfree((void *)i2c_priv->hwrng.priv);
}
static const struct of_device_id atmel_sha204a_dt_ids[] = {
static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
{ .compatible = "atmel,atsha204", },
{ .compatible = "atmel,atsha204a", },
{ /* sentinel */ }

View File

@ -565,17 +565,12 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
if (req->cryptlen < ivsize)
return;
if (rctx->mode & TDES_FLAGS_ENCRYPT) {
if (rctx->mode & TDES_FLAGS_ENCRYPT)
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - ivsize, ivsize, 0);
} else {
if (req->src == req->dst)
memcpy(req->iv, rctx->lastc, ivsize);
else
scatterwalk_map_and_copy(req->iv, req->src,
req->cryptlen - ivsize,
ivsize, 0);
}
else
memcpy(req->iv, rctx->lastc, ivsize);
}
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
@ -722,7 +717,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
!(mode & TDES_FLAGS_ENCRYPT)) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)

View File

@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* Copyright 2016-2019, 2023 NXP
*
* Based on talitos crypto API driver.
*
@ -3542,13 +3542,14 @@ int caam_algapi_init(struct device *ctrldev)
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
if (priv->era < 10) {
struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
u32 cha_vid, cha_inst, aes_rn;
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
cha_vid = rd_reg32(&perfmon->cha_id_ls);
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
cha_inst = rd_reg32(&perfmon->cha_num_ls);
des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
@ -3556,23 +3557,23 @@ int caam_algapi_init(struct device *ctrldev)
ccha_inst = 0;
ptha_inst = 0;
aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
CHA_ID_LS_AES_MASK;
aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
} else {
struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
u32 aesa, mdha;
aesa = rd_reg32(&priv->ctrl->vreg.aesa);
mdha = rd_reg32(&priv->ctrl->vreg.mdha);
aesa = rd_reg32(&vreg->aesa);
mdha = rd_reg32(&vreg->mdha);
aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
aes_inst = aesa & CHA_VER_NUM_MASK;
md_inst = mdha & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}

View File

@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
* Copyright 2018-2019 NXP
* Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@ -1956,12 +1956,14 @@ int caam_algapi_hash_init(struct device *ctrldev)
* presence and attributes of MD block.
*/
if (priv->era < 10) {
md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
md_vid = (rd_reg32(&perfmon->cha_id_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
md_inst = (rd_reg32(&perfmon->cha_num_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
} else {
u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_inst = mdha & CHA_VER_NUM_MASK;

View File

@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
* Copyright 2018-2019 NXP
* Copyright 2018-2019, 2023 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@ -1168,10 +1168,10 @@ int caam_pkc_init(struct device *ctrldev)
/* Determine public key hardware accelerator presence. */
if (priv->era < 10) {
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
} else {
pkha = rd_reg32(&priv->ctrl->vreg.pkha);
pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
pk_inst = pkha & CHA_VER_NUM_MASK;
/*

View File

@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
* Copyright 2018-2019 NXP
* Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@ -227,10 +227,10 @@ int caam_rng_init(struct device *ctrldev)
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
else
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
if (!rng_inst)
return 0;

View File

@ -3,7 +3,7 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
* Copyright 2018-2019 NXP
* Copyright 2018-2019, 2023 NXP
*/
#include <linux/device.h>
@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
const u32 rdsta_if = RDSTA_IF0 << sh_idx;
const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
const u32 rdsta_mask = rdsta_if | rdsta_pr;
/* Clear the contents before using the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
}
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
}
kfree(desc);
@ -395,7 +397,7 @@ start_rng:
RTMCTL_SAMP_MODE_RAW_ES_SC);
}
static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon)
{
static const struct {
u16 ip_id;
@ -421,12 +423,12 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
u16 ip_id;
int i;
ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
ccbvid = rd_reg32(&perfmon->ccb_id);
era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
if (era) /* This is '0' prior to CAAM ERA-6 */
return era;
id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
id_ms = rd_reg32(&perfmon->caam_id_ms);
ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
@ -444,9 +446,9 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
* In case this property is not passed an attempt to retrieve the CAAM
* era via register reads will be made.
*
* @ctrl: controller region
* @perfmon: Performance Monitor Registers
*/
static int caam_get_era(struct caam_ctrl __iomem *ctrl)
static int caam_get_era(struct caam_perfmon __iomem *perfmon)
{
struct device_node *caam_node;
int ret;
@ -459,7 +461,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
if (!ret)
return prop;
else
return caam_get_era_from_hw(ctrl);
return caam_get_era_from_hw(perfmon);
}
/*
@ -626,12 +628,14 @@ static int caam_probe(struct platform_device *pdev)
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
struct caam_perfmon __iomem *perfmon;
struct dentry *dfs_root;
u32 scfgr, comp_params;
u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
bool pr_support = false;
bool reg_access = true;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@ -645,6 +649,17 @@ static int caam_probe(struct platform_device *pdev)
caam_imx = (bool)imx_soc_match;
if (imx_soc_match) {
/*
* Until Layerscape and i.MX OP-TEE get in sync,
* only i.MX OP-TEE use cases disallow access to
* caam page 0 (controller) registers.
*/
np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
ctrlpriv->optee_en = !!np;
of_node_put(np);
reg_access = !ctrlpriv->optee_en;
if (!imx_soc_match->data) {
dev_err(dev, "No clock data provided for i.MX SoC");
return -EINVAL;
@ -665,10 +680,38 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
ring = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
u32 reg;
if (of_property_read_u32_index(np, "reg", 0, &reg)) {
dev_err(dev, "%s read reg property error\n",
np->full_name);
continue;
}
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
((__force uint8_t *)ctrl + reg);
ctrlpriv->total_jobrs++;
ring++;
}
/*
* Wherever possible, instead of accessing registers from the global page,
* use the alias registers in the first (cf. DT nodes order)
* job ring's page.
*/
perfmon = ring ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
(struct caam_perfmon __iomem *)&ctrl->perfmon;
caam_little_end = !(bool)(rd_reg32(&perfmon->status) &
(CSTA_PLEND | CSTA_ALT_PLEND));
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
comp_params = rd_reg32(&perfmon->comp_parms_ms);
if (reg_access && comp_params & CTPR_MS_PS &&
rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
caam_ptr_sz = sizeof(u64);
else
caam_ptr_sz = sizeof(u32);
@ -733,6 +776,9 @@ static int caam_probe(struct platform_device *pdev)
}
#endif
if (!reg_access)
goto set_dma_mask;
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register.
@ -772,13 +818,14 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
set_dma_mask:
ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
return ret;
}
ctrlpriv->era = caam_get_era(ctrl);
ctrlpriv->era = caam_get_era(perfmon);
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
dfs_root = debugfs_create_dir(dev_name(dev), NULL);
@ -789,7 +836,7 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
caam_debugfs_init(ctrlpriv, dfs_root);
caam_debugfs_init(ctrlpriv, perfmon, dfs_root);
/* Check to see if (DPAA 1.x) QI present. If so, enable */
if (ctrlpriv->qi_present && !caam_dpaa2) {
@ -808,26 +855,16 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
ring = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
((__force uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
ctrlpriv->total_jobrs++;
ring++;
}
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
return -ENOMEM;
}
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ls);
if (!reg_access)
goto report_live;
comp_params = rd_reg32(&perfmon->comp_parms_ls);
ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
/*
@ -836,15 +873,21 @@ static int caam_probe(struct platform_device *pdev)
* check both here.
*/
if (ctrlpriv->era < 10) {
rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
(rd_reg32(&ctrl->perfmon.cha_num_ls) & CHA_ID_LS_AES_MASK);
(rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
} else {
rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
struct version_regs __iomem *vreg;
vreg = ctrlpriv->total_jobrs ?
(struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
(struct version_regs __iomem *)&ctrl->vreg;
rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
CHA_VER_VID_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
(rd_reg32(&ctrl->vreg.aesa) & CHA_VER_MISC_AES_NUM_MASK);
(rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
}
/*
@ -923,10 +966,11 @@ static int caam_probe(struct platform_device *pdev)
clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
}
report_live:
/* NOTE: RTIC detection ought to go here, around Si time */
caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
(u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
(u64)rd_reg32(&perfmon->caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
/* Copyright 2019, 2023 NXP */
#include <linux/debugfs.h>
#include "compat.h"
@ -42,16 +42,15 @@ void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
}
#endif
void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
struct caam_perfmon __force *perfmon,
struct dentry *root)
{
struct caam_perfmon *perfmon;
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
ctrlpriv->ctl = debugfs_create_dir("ctl", root);
@ -78,6 +77,9 @@ void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
&perfmon->status, &caam_fops_u32_ro);
if (ctrlpriv->optee_en)
return;
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);

View File

@ -1,16 +1,19 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2019 NXP */
/* Copyright 2019, 2023 NXP */
#ifndef CAAM_DEBUGFS_H
#define CAAM_DEBUGFS_H
struct dentry;
struct caam_drv_private;
struct caam_perfmon;
#ifdef CONFIG_DEBUG_FS
void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
struct caam_perfmon __force *perfmon, struct dentry *root);
#else
static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
struct caam_perfmon __force *perfmon,
struct dentry *root)
{}
#endif

View File

@ -8,7 +8,7 @@
static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
{
struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
struct dpaa2_caam_priv *priv = file->private;
u32 fqid, fcnt, bcnt;
int i, err;

View File

@ -94,6 +94,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
u8 optee_en; /* Nonzero if OP-TEE f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */

View File

@ -4,7 +4,7 @@
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
* Copyright 2019 NXP
* Copyright 2019, 2023 NXP
*/
#include <linux/of_irq.h>
@ -72,19 +72,27 @@ static void caam_jr_crypto_engine_exit(void *data)
crypto_engine_exit(jrpriv->engine);
}
static int caam_reset_hw_jr(struct device *dev)
/*
* Put the CAAM in quiesce, ie stop
*
* Must be called with itr disabled
*/
static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
unsigned int timeout = 100000;
/*
* mask interrupts since we are going to poll
* for reset completion status
*/
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
/* Check the current status */
if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
goto wait_quiesce_completion;
/* initiate flush (required prior to reset) */
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
/* Reset the field */
clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
/* initiate flush / park (required prior to reset) */
wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
wait_quiesce_completion:
while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
JRINT_ERR_HALT_INPROGRESS) && --timeout)
cpu_relax();
@ -95,8 +103,35 @@ static int caam_reset_hw_jr(struct device *dev)
return -EIO;
}
return 0;
}
/*
* Flush the job ring, so the jobs running will be stopped, jobs queued will be
* invalidated and the CAAM will no longer fetch fron input ring.
*
* Must be called with itr disabled
*/
static int caam_jr_flush(struct device *dev)
{
return caam_jr_stop_processing(dev, JRCR_RESET);
}
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
unsigned int timeout = 100000;
int err;
/*
* mask interrupts since we are going to poll
* for reset completion status
*/
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
err = caam_jr_flush(dev);
if (err)
return err;
/* initiate reset */
timeout = 100000;
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
cpu_relax();
@ -163,6 +198,11 @@ static int caam_jr_remove(struct platform_device *pdev)
return ret;
}
static void caam_jr_platform_shutdown(struct platform_device *pdev)
{
caam_jr_remove(pdev);
}
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
@ -618,6 +658,7 @@ static struct platform_driver caam_jr_driver = {
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,
.shutdown = caam_jr_platform_shutdown,
};
static int __init jr_driver_init(void)

View File

@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/list.h>

View File

@ -10,7 +10,8 @@ ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
tee-dev.o
tee-dev.o \
platform-access.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \

View File

@ -0,0 +1,215 @@
// SPDX-License-Identifier: GPL-2.0
/*
* AMD Platform Security Processor (PSP) Platform Access interface
*
* Copyright (C) 2023 Advanced Micro Devices, Inc.
*
* Author: Mario Limonciello <mario.limonciello@amd.com>
*
* Some of this code is adapted from drivers/i2c/busses/i2c-designware-amdpsp.c
* developed by Jan Dabros <jsd@semihalf.com> and Copyright (C) 2022 Google Inc.
*
*/
#include <linux/bitfield.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/mutex.h>
#include "platform-access.h"
#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
#define DOORBELL_CMDRESP_STS GENMASK(7, 0)
/* Recovery field should be equal 0 to start sending commands */
static int check_recovery(u32 __iomem *cmd)
{
return FIELD_GET(PSP_CMDRESP_RECOVERY, ioread32(cmd));
}
static int wait_cmd(u32 __iomem *cmd)
{
u32 tmp, expected;
/* Expect mbox_cmd to be cleared and ready bit to be set by PSP */
expected = FIELD_PREP(PSP_CMDRESP_RESP, 1);
/*
* Check for readiness of PSP mailbox in a tight loop in order to
* process further as soon as command was consumed.
*/
return readl_poll_timeout(cmd, tmp, (tmp & expected), 0,
PSP_CMD_TIMEOUT_US);
}
int psp_check_platform_access_status(void)
{
struct psp_device *psp = psp_get_master_device();
if (!psp || !psp->platform_access_data)
return -ENODEV;
return 0;
}
EXPORT_SYMBOL(psp_check_platform_access_status);
int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
struct psp_request *req)
{
struct psp_device *psp = psp_get_master_device();
u32 __iomem *cmd, *lo, *hi;
struct psp_platform_access_device *pa_dev;
phys_addr_t req_addr;
u32 cmd_reg;
int ret;
if (!psp || !psp->platform_access_data)
return -ENODEV;
pa_dev = psp->platform_access_data;
cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg;
lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg;
hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
mutex_lock(&pa_dev->mailbox_mutex);
if (check_recovery(cmd)) {
dev_dbg(psp->dev, "platform mailbox is in recovery\n");
ret = -EBUSY;
goto unlock;
}
if (wait_cmd(cmd)) {
dev_dbg(psp->dev, "platform mailbox is not done processing command\n");
ret = -EBUSY;
goto unlock;
}
/*
* Fill mailbox with address of command-response buffer, which will be
* used for sending i2c requests as well as reading status returned by
* PSP. Use physical address of buffer, since PSP will map this region.
*/
req_addr = __psp_pa(req);
iowrite32(lower_32_bits(req_addr), lo);
iowrite32(upper_32_bits(req_addr), hi);
print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
req->header.payload_size, false);
/* Write command register to trigger processing */
cmd_reg = FIELD_PREP(PSP_CMDRESP_CMD, msg);
iowrite32(cmd_reg, cmd);
if (wait_cmd(cmd)) {
ret = -ETIMEDOUT;
goto unlock;
}
/* Ensure it was triggered by this driver */
if (ioread32(lo) != lower_32_bits(req_addr) ||
ioread32(hi) != upper_32_bits(req_addr)) {
ret = -EBUSY;
goto unlock;
}
/* Store the status in request header for caller to investigate */
cmd_reg = ioread32(cmd);
req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
if (req->header.status) {
ret = -EIO;
goto unlock;
}
print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
req->header.payload_size, false);
ret = 0;
unlock:
mutex_unlock(&pa_dev->mailbox_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(psp_send_platform_access_msg);
int psp_ring_platform_doorbell(int msg, u32 *result)
{
struct psp_device *psp = psp_get_master_device();
struct psp_platform_access_device *pa_dev;
u32 __iomem *button, *cmd;
int ret, val;
if (!psp || !psp->platform_access_data)
return -ENODEV;
pa_dev = psp->platform_access_data;
button = psp->io_regs + pa_dev->vdata->doorbell_button_reg;
cmd = psp->io_regs + pa_dev->vdata->doorbell_cmd_reg;
mutex_lock(&pa_dev->doorbell_mutex);
if (wait_cmd(cmd)) {
dev_err(psp->dev, "doorbell command not done processing\n");
ret = -EBUSY;
goto unlock;
}
iowrite32(FIELD_PREP(DOORBELL_CMDRESP_STS, msg), cmd);
iowrite32(PSP_DRBL_RING, button);
if (wait_cmd(cmd)) {
ret = -ETIMEDOUT;
goto unlock;
}
val = FIELD_GET(DOORBELL_CMDRESP_STS, ioread32(cmd));
if (val) {
if (result)
*result = val;
ret = -EIO;
goto unlock;
}
ret = 0;
unlock:
mutex_unlock(&pa_dev->doorbell_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(psp_ring_platform_doorbell);
void platform_access_dev_destroy(struct psp_device *psp)
{
struct psp_platform_access_device *pa_dev = psp->platform_access_data;
if (!pa_dev)
return;
mutex_destroy(&pa_dev->mailbox_mutex);
mutex_destroy(&pa_dev->doorbell_mutex);
psp->platform_access_data = NULL;
}
int platform_access_dev_init(struct psp_device *psp)
{
struct device *dev = psp->dev;
struct psp_platform_access_device *pa_dev;
pa_dev = devm_kzalloc(dev, sizeof(*pa_dev), GFP_KERNEL);
if (!pa_dev)
return -ENOMEM;
psp->platform_access_data = pa_dev;
pa_dev->psp = psp;
pa_dev->dev = dev;
pa_dev->vdata = (struct platform_access_vdata *)psp->vdata->platform_access;
mutex_init(&pa_dev->mailbox_mutex);
mutex_init(&pa_dev->doorbell_mutex);
dev_dbg(dev, "platform access enabled\n");
return 0;
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD Platform Security Processor (PSP) Platform Access interface
*
* Copyright (C) 2023 Advanced Micro Devices, Inc.
*
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
#ifndef __PSP_PLATFORM_ACCESS_H__
#define __PSP_PLATFORM_ACCESS_H__
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/psp-platform-access.h>
#include "psp-dev.h"
struct psp_platform_access_device {
struct device *dev;
struct psp_device *psp;
struct platform_access_vdata *vdata;
struct mutex mailbox_mutex;
struct mutex doorbell_mutex;
void *platform_access_data;
};
void platform_access_dev_destroy(struct psp_device *psp);
int platform_access_dev_init(struct psp_device *psp);
#endif /* __PSP_PLATFORM_ACCESS_H__ */

View File

@ -14,6 +14,7 @@
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
#include "platform-access.h"
struct psp_device *psp_master;
@ -42,18 +43,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
/* invoke subdevice interrupt handlers */
if (status) {
if (psp->sev_irq_handler)
psp->sev_irq_handler(irq, psp->sev_irq_data, status);
if (psp->tee_irq_handler)
psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
@ -105,6 +103,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
static void psp_init_platform_access(struct psp_device *psp)
{
int ret;
ret = platform_access_dev_init(psp);
if (ret) {
dev_warn(psp->dev, "platform access init failed: %d\n", ret);
return;
}
}
static int psp_init(struct psp_device *psp)
{
int ret;
@ -121,6 +130,9 @@ static int psp_init(struct psp_device *psp)
return ret;
}
if (psp->vdata->platform_access)
psp_init_platform_access(psp);
return 0;
}
@ -201,6 +213,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
platform_access_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
if (sp->clear_psp_master_device)
@ -219,18 +233,6 @@ void psp_clear_sev_irq_handler(struct psp_device *psp)
psp_set_sev_irq_handler(psp, NULL, NULL);
}
void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
void *data)
{
psp->tee_irq_data = data;
psp->tee_irq_handler = handler;
}
void psp_clear_tee_irq_handler(struct psp_device *psp)
{
psp_set_tee_irq_handler(psp, NULL, NULL);
}
struct psp_device *psp_get_master_device(void)
{
struct sp_device *sp = sp_get_psp_master_device();

View File

@ -17,9 +17,6 @@
#include "sp-dev.h"
#define PSP_CMDRESP_RESP BIT(31)
#define PSP_CMDRESP_ERR_MASK 0xffff
#define MAX_PSP_NAME_LEN 16
extern struct psp_device *psp_master;
@ -40,11 +37,9 @@ struct psp_device {
psp_irq_handler_t sev_irq_handler;
void *sev_irq_data;
psp_irq_handler_t tee_irq_handler;
void *tee_irq_data;
void *sev_data;
void *tee_data;
void *platform_access_data;
unsigned int capability;
};
@ -53,10 +48,6 @@ void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
void *data);
void psp_clear_sev_irq_handler(struct psp_device *psp);
void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
void *data);
void psp_clear_tee_irq_handler(struct psp_device *psp);
struct psp_device *psp_get_master_device(void);
#define PSP_CAPABILITY_SEV BIT(0)

View File

@ -7,6 +7,7 @@
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@ -24,6 +25,7 @@
#include <linux/cpufeature.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
#include <linux/psp.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
@ -102,7 +104,7 @@ static void sev_irq_handler(int irq, void *data, unsigned int status)
/* Check if it is SEV command completion: */
reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
if (reg & PSP_CMDRESP_RESP) {
if (FIELD_GET(PSP_CMDRESP_RESP, reg)) {
sev->int_rcvd = 1;
wake_up(&sev->int_queue);
}
@ -346,9 +348,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev->int_rcvd = 0;
reg = cmd;
reg <<= SEV_CMDRESP_CMD_SHIFT;
reg |= SEV_CMDRESP_IOC;
reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
@ -366,11 +366,11 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
psp_timeout = psp_cmd_timeout;
if (psp_ret)
*psp_ret = reg & PSP_CMDRESP_ERR_MASK;
*psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
if (reg & PSP_CMDRESP_ERR_MASK) {
dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
cmd, reg & PSP_CMDRESP_ERR_MASK);
if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
ret = -EIO;
} else {
ret = sev_write_init_ex_file_if_required(cmd);

View File

@ -25,8 +25,8 @@
#include <linux/miscdevice.h>
#include <linux/capability.h>
#define SEV_CMDRESP_CMD GENMASK(26, 16)
#define SEV_CMD_COMPLETE BIT(1)
#define SEV_CMDRESP_CMD_SHIFT 16
#define SEV_CMDRESP_IOC BIT(0)
struct sev_misc_dev {

View File

@ -53,9 +53,19 @@ struct tee_vdata {
const unsigned int ring_rptr_reg;
};
struct platform_access_vdata {
const unsigned int cmdresp_reg;
const unsigned int cmdbuff_addr_lo_reg;
const unsigned int cmdbuff_addr_hi_reg;
const unsigned int doorbell_button_reg;
const unsigned int doorbell_cmd_reg;
};
struct psp_vdata {
const struct sev_vdata *sev;
const struct tee_vdata *tee;
const struct platform_access_vdata *platform_access;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;

View File

@ -361,6 +361,14 @@ static const struct tee_vdata teev1 = {
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
};
static const struct platform_access_vdata pa_v1 = {
.cmdresp_reg = 0x10570, /* C2PMSG_28 */
.cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */
.cmdbuff_addr_hi_reg = 0x10578, /* C2PMSG_30 */
.doorbell_button_reg = 0x10a24, /* C2PMSG_73 */
.doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
};
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
.feature_reg = 0x105fc, /* C2PMSG_63 */
@ -377,6 +385,7 @@ static const struct psp_vdata pspv2 = {
static const struct psp_vdata pspv3 = {
.tee = &teev1,
.platform_access = &pa_v1,
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
@ -451,9 +460,9 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
/* Last entry must be zero */
{ 0, }
};

View File

@ -8,12 +8,13 @@
* Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/
#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/gfp.h>
#include <linux/psp-sev.h>
#include <linux/psp.h>
#include <linux/psp-tee.h>
#include "psp-dev.h"
@ -69,7 +70,7 @@ static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
while (--nloop) {
*reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
if (*reg & PSP_CMDRESP_RESP)
if (FIELD_GET(PSP_CMDRESP_RESP, *reg))
return 0;
usleep_range(10000, 10100);
@ -149,9 +150,9 @@ static int tee_init_ring(struct psp_tee_device *tee)
goto free_buf;
}
if (reg & PSP_CMDRESP_ERR_MASK) {
dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
reg & PSP_CMDRESP_ERR_MASK);
if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
dev_err(tee->dev, "tee: ring init command failed (%#010lx)\n",
FIELD_GET(PSP_CMDRESP_STS, reg));
tee_free_ring(tee);
ret = -EIO;
}
@ -179,9 +180,9 @@ static void tee_destroy_ring(struct psp_tee_device *tee)
ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
if (ret) {
dev_err(tee->dev, "tee: ring destroy command timed out\n");
} else if (reg & PSP_CMDRESP_ERR_MASK) {
dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
reg & PSP_CMDRESP_ERR_MASK);
} else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
dev_err(tee->dev, "tee: ring destroy command failed (%#010lx)\n",
FIELD_GET(PSP_CMDRESP_STS, reg));
}
free_ring:

View File

@ -350,9 +350,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Get device resources */
/* First CC registers space */
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
new_drvdata->cc_base = devm_platform_get_and_ioremap_resource(plat_dev,
0, &req_mem_cc_regs);
if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);

View File

@ -879,7 +879,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
static void hifn_init_dma(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
u32 dptr = dev->desc_dma;
int i;
@ -1072,7 +1072,7 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
u8 *buf, unsigned dlen, unsigned slen,
u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
struct hifn_crypt_command *cry_cmd;
u8 *buf_pos = buf;
u16 cmd_len;
@ -1113,7 +1113,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
struct hifn_context *ctx, struct hifn_request_context *rctx,
void *priv, unsigned int nbytes)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
int cmd_len, sa_idx;
u8 *buf, *buf_pos;
u16 mask;
@ -1231,7 +1231,7 @@ err_out:
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size, int last)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
int idx;
dma_addr_t addr;
@ -1264,7 +1264,7 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
static void hifn_setup_res_desc(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
@ -1290,7 +1290,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev)
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size, int last)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
int idx;
dma_addr_t addr;
@ -1710,7 +1710,7 @@ static void hifn_process_ready(struct skcipher_request *req, int error)
static void hifn_clear_rings(struct hifn_device *dev, int error)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
int i, u;
dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
@ -1784,7 +1784,7 @@ static void hifn_work(struct work_struct *work)
spin_lock_irqsave(&dev->lock, flags);
if (dev->active == 0) {
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
dev->flags &= ~HIFN_FLAG_CMD_BUSY;
@ -1815,7 +1815,7 @@ static void hifn_work(struct work_struct *work)
if (reset) {
if (++dev->reset >= 5) {
int i;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
dev_info(&dev->pdev->dev,
"r: %08x, active: %d, started: %d, "
@ -1848,8 +1848,8 @@ static void hifn_work(struct work_struct *work)
static irqreturn_t hifn_interrupt(int irq, void *data)
{
struct hifn_device *dev = (struct hifn_device *)data;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_device *dev = data;
struct hifn_dma *dma = dev->desc_virt;
u32 dmacsr, restart;
dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
@ -1914,7 +1914,7 @@ static void hifn_flush(struct hifn_device *dev)
unsigned long flags;
struct crypto_async_request *async_req;
struct skcipher_request *req;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_dma *dma = dev->desc_virt;
int i;
for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {

View File

@ -82,3 +82,10 @@ config CRYPTO_DEV_HISI_TRNG
select CRYPTO_RNG
help
Support for HiSilicon TRNG Driver.
config CRYPTO_DEV_HISTB_TRNG
tristate "Support for HiSTB TRNG Driver"
depends on ARCH_HISI || COMPILE_TEST
select HW_RANDOM
help
Support for HiSTB TRNG Driver.

View File

@ -5,4 +5,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
hisi_qm-objs = qm.o sgl.o debugfs.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
obj-y += trng/

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>

Some files were not shown because too many files have changed in this diff Show More