mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 14:14:24 +08:00
This update includes the following changes:
API: - Add incremental lskcipher/skcipher processing. Algorithms: - Remove SHA1 from drbg. - Remove CFB and OFB. Drivers: - Add comp high perf mode configuration in hisilicon/zip. - Add support for 420xx devices in qat. - Add IAA Compression Accelerator driver. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmWdxR4ACgkQxycdCkmx i6fAjg//SqOwxeUYWpT4KdMCxGMn7U9iE3wJeX8nqfma3a62Wt2soey7H3GB9G7v gEh0OraOKIGeBtS8giIX83SZJOirMlgeE2tngxMmR9O95EUNR0XGnywF/emyt96z WcSN1IrRZ8qQzTASBF0KpV2Ir5mNzBiOwU9tVHIztROufA4C1fwKl7yhPM67C3MU 88vf1R+ZeWUbNbzQNC8oYIqU11dcNaMNhOVPiZCECKbIR6LqwUf3Swexz+HuPR/D WTSrb4J3Eeg77SMhI959/Hi53WeEyVW1vWYAVMgfTEFw6PESiOXyPeImfzUMFos6 fFYIAoQzoG5GlQeYwLLSoZAwtfY+f7gTNoaE+bnPk5317EFzFDijaXrkjjVKqkS2 OOBfxrMMIGNmxp7pPkt6HPnIvGNTo+SnbAdVIm6M3EN1K+BTGrj7/CTJkcT6XSyK nCBL6nbP7zMB1GJfCFGPvlIdW4oYnAfB1Q5YJ9tzYbEZ0t5NWxDKZ45RnM9xQp4Y 2V1zdfALdqmGRKBWgyUcqp1T4/AYRU0+WaQxz7gHw3BPR4QmfVLPRqiiR7OT0Z+P XFotOYD3epVXS1OUyZdLBn5+FXLnRd1uylQ+j8FNfnddr4Nr+tH1J6edK71NMvXG Tj7p5rP5bbgvVkD43ywsVnCI0w+9NS55mH5UP2Y4fSLS6p2tJAw= =yMmO -----END PGP SIGNATURE----- Merge tag 'v6.8-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 Pull crypto updates from Herbert Xu: "API: - Add incremental lskcipher/skcipher processing Algorithms: - Remove SHA1 from drbg - Remove CFB and OFB Drivers: - Add comp high perf mode configuration in hisilicon/zip - Add support for 420xx devices in qat - Add IAA Compression Accelerator driver" * tag 'v6.8-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (172 commits) crypto: iaa - Account for cpu-less numa nodes crypto: scomp - fix req->dst buffer overflow crypto: sahara - add support for crypto_engine crypto: sahara - remove error message for bad aes request size crypto: sahara - remove unnecessary NULL assignments crypto: sahara - remove 'active' flag from sahara_aes_reqctx struct crypto: sahara - use dev_err_probe() crypto: sahara - use devm_clk_get_enabled() crypto: sahara - use BIT() macro crypto: sahara - clean up macro indentation crypto: sahara - do not resize req->src when doing hash operations crypto: sahara - fix processing hash requests with req->nbytes < sg->length crypto: sahara - improve error handling in sahara_sha_process() crypto: sahara - fix wait_for_completion_timeout() error handling crypto: sahara - fix ahash reqsize crypto: sahara - handle zero-length aes requests crypto: skcipher - remove excess kerneldoc members crypto: shash - remove excess kerneldoc members crypto: qat - generate dynamically arbiter mappings crypto: qat - add support for ring pair level telemetry ...
This commit is contained in:
commit
0cb552aa97
228
Documentation/ABI/testing/debugfs-driver-qat_telemetry
Normal file
228
Documentation/ABI/testing/debugfs-driver-qat_telemetry
Normal file
@ -0,0 +1,228 @@
|
||||
What: /sys/kernel/debug/qat_<device>_<BDF>/telemetry/control
|
||||
Date: March 2024
|
||||
KernelVersion: 6.8
|
||||
Contact: qat-linux@intel.com
|
||||
Description: (RW) Enables/disables the reporting of telemetry metrics.
|
||||
|
||||
Allowed values to write:
|
||||
========================
|
||||
* 0: disable telemetry
|
||||
* 1: enable telemetry
|
||||
* 2, 3, 4: enable telemetry and calculate minimum, maximum
|
||||
and average for each counter over 2, 3 or 4 samples
|
||||
|
||||
Returned values:
|
||||
================
|
||||
* 1-4: telemetry is enabled and running
|
||||
* 0: telemetry is disabled
|
||||
|
||||
Example.
|
||||
|
||||
Writing '3' to this file starts the collection of
|
||||
telemetry metrics. Samples are collected every second and
|
||||
stored in a circular buffer of size 3. These values are then
|
||||
used to calculate the minimum, maximum and average for each
|
||||
counter. After enabling, counters can be retrieved through
|
||||
the ``device_data`` file::
|
||||
|
||||
echo 3 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control
|
||||
|
||||
Writing '0' to this file stops the collection of telemetry
|
||||
metrics::
|
||||
|
||||
echo 0 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control
|
||||
|
||||
This attribute is only available for qat_4xxx devices.
|
||||
|
||||
What: /sys/kernel/debug/qat_<device>_<BDF>/telemetry/device_data
|
||||
Date: March 2024
|
||||
KernelVersion: 6.8
|
||||
Contact: qat-linux@intel.com
|
||||
Description: (RO) Reports device telemetry counters.
|
||||
Reads report metrics about performance and utilization of
|
||||
a QAT device:
|
||||
|
||||
======================= ========================================
|
||||
Field Description
|
||||
======================= ========================================
|
||||
sample_cnt number of acquisitions of telemetry data
|
||||
from the device. Reads are performed
|
||||
every 1000 ms.
|
||||
pci_trans_cnt number of PCIe partial transactions
|
||||
max_rd_lat maximum logged read latency [ns] (could
|
||||
be any read operation)
|
||||
rd_lat_acc_avg average read latency [ns]
|
||||
max_gp_lat max get to put latency [ns] (only takes
|
||||
samples for AE0)
|
||||
gp_lat_acc_avg average get to put latency [ns]
|
||||
bw_in PCIe, write bandwidth [Mbps]
|
||||
bw_out PCIe, read bandwidth [Mbps]
|
||||
at_page_req_lat_avg Address Translator(AT), average page
|
||||
request latency [ns]
|
||||
at_trans_lat_avg AT, average page translation latency [ns]
|
||||
at_max_tlb_used AT, maximum uTLB used
|
||||
util_cpr<N> utilization of Compression slice N [%]
|
||||
exec_cpr<N> execution count of Compression slice N
|
||||
util_xlt<N> utilization of Translator slice N [%]
|
||||
exec_xlt<N> execution count of Translator slice N
|
||||
util_dcpr<N> utilization of Decompression slice N [%]
|
||||
exec_dcpr<N> execution count of Decompression slice N
|
||||
util_pke<N> utilization of PKE N [%]
|
||||
exec_pke<N> execution count of PKE N
|
||||
util_ucs<N> utilization of UCS slice N [%]
|
||||
exec_ucs<N> execution count of UCS slice N
|
||||
util_wat<N> utilization of Wireless Authentication
|
||||
slice N [%]
|
||||
exec_wat<N> execution count of Wireless Authentication
|
||||
slice N
|
||||
util_wcp<N> utilization of Wireless Cipher slice N [%]
|
||||
exec_wcp<N> execution count of Wireless Cipher slice N
|
||||
util_cph<N> utilization of Cipher slice N [%]
|
||||
exec_cph<N> execution count of Cipher slice N
|
||||
util_ath<N> utilization of Authentication slice N [%]
|
||||
exec_ath<N> execution count of Authentication slice N
|
||||
======================= ========================================
|
||||
|
||||
The telemetry report file can be read with the following command::
|
||||
|
||||
cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/device_data
|
||||
|
||||
If ``control`` is set to 1, only the current values of the
|
||||
counters are displayed::
|
||||
|
||||
<counter_name> <current>
|
||||
|
||||
If ``control`` is 2, 3 or 4, counters are displayed in the
|
||||
following format::
|
||||
|
||||
<counter_name> <current> <min> <max> <avg>
|
||||
|
||||
If a device lacks of a specific accelerator, the corresponding
|
||||
attribute is not reported.
|
||||
|
||||
This attribute is only available for qat_4xxx devices.
|
||||
|
||||
What: /sys/kernel/debug/qat_<device>_<BDF>/telemetry/rp_<A/B/C/D>_data
|
||||
Date: March 2024
|
||||
KernelVersion: 6.8
|
||||
Contact: qat-linux@intel.com
|
||||
Description: (RW) Selects up to 4 Ring Pairs (RP) to monitor, one per file,
|
||||
and report telemetry counters related to each.
|
||||
|
||||
Allowed values to write:
|
||||
========================
|
||||
* 0 to ``<num_rps - 1>``:
|
||||
Ring pair to be monitored. The value of ``num_rps`` can be
|
||||
retrieved through ``/sys/bus/pci/devices/<BDF>/qat/num_rps``.
|
||||
See Documentation/ABI/testing/sysfs-driver-qat.
|
||||
|
||||
Reads report metrics about performance and utilization of
|
||||
the selected RP:
|
||||
|
||||
======================= ========================================
|
||||
Field Description
|
||||
======================= ========================================
|
||||
sample_cnt number of acquisitions of telemetry data
|
||||
from the device. Reads are performed
|
||||
every 1000 ms
|
||||
rp_num RP number associated with slot <A/B/C/D>
|
||||
service_type service associated to the RP
|
||||
pci_trans_cnt number of PCIe partial transactions
|
||||
gp_lat_acc_avg average get to put latency [ns]
|
||||
bw_in PCIe, write bandwidth [Mbps]
|
||||
bw_out PCIe, read bandwidth [Mbps]
|
||||
at_glob_devtlb_hit Message descriptor DevTLB hit rate
|
||||
at_glob_devtlb_miss Message descriptor DevTLB miss rate
|
||||
tl_at_payld_devtlb_hit Payload DevTLB hit rate
|
||||
tl_at_payld_devtlb_miss Payload DevTLB miss rate
|
||||
======================= ========================================
|
||||
|
||||
Example.
|
||||
|
||||
Writing the value '32' to the file ``rp_C_data`` starts the
|
||||
collection of telemetry metrics for ring pair 32::
|
||||
|
||||
echo 32 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data
|
||||
|
||||
Once a ring pair is selected, statistics can be read accessing
|
||||
the file::
|
||||
|
||||
cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data
|
||||
|
||||
If ``control`` is set to 1, only the current values of the
|
||||
counters are displayed::
|
||||
|
||||
<counter_name> <current>
|
||||
|
||||
If ``control`` is 2, 3 or 4, counters are displayed in the
|
||||
following format::
|
||||
|
||||
<counter_name> <current> <min> <max> <avg>
|
||||
|
||||
|
||||
On QAT GEN4 devices there are 64 RPs on a PF, so the allowed
|
||||
values are 0..63. This number is absolute to the device.
|
||||
If Virtual Functions (VF) are used, the ring pair number can
|
||||
be derived from the Bus, Device, Function of the VF:
|
||||
|
||||
============ ====== ====== ====== ======
|
||||
PCI BDF/VF RP0 RP1 RP2 RP3
|
||||
============ ====== ====== ====== ======
|
||||
0000:6b:0.1 RP 0 RP 1 RP 2 RP 3
|
||||
0000:6b:0.2 RP 4 RP 5 RP 6 RP 7
|
||||
0000:6b:0.3 RP 8 RP 9 RP 10 RP 11
|
||||
0000:6b:0.4 RP 12 RP 13 RP 14 RP 15
|
||||
0000:6b:0.5 RP 16 RP 17 RP 18 RP 19
|
||||
0000:6b:0.6 RP 20 RP 21 RP 22 RP 23
|
||||
0000:6b:0.7 RP 24 RP 25 RP 26 RP 27
|
||||
0000:6b:1.0 RP 28 RP 29 RP 30 RP 31
|
||||
0000:6b:1.1 RP 32 RP 33 RP 34 RP 35
|
||||
0000:6b:1.2 RP 36 RP 37 RP 38 RP 39
|
||||
0000:6b:1.3 RP 40 RP 41 RP 42 RP 43
|
||||
0000:6b:1.4 RP 44 RP 45 RP 46 RP 47
|
||||
0000:6b:1.5 RP 48 RP 49 RP 50 RP 51
|
||||
0000:6b:1.6 RP 52 RP 53 RP 54 RP 55
|
||||
0000:6b:1.7 RP 56 RP 57 RP 58 RP 59
|
||||
0000:6b:2.0 RP 60 RP 61 RP 62 RP 63
|
||||
============ ====== ====== ====== ======
|
||||
|
||||
The mapping is only valid for the BDFs of VFs on the host.
|
||||
|
||||
|
||||
The service provided on a ring-pair varies depending on the
|
||||
configuration. The configuration for a given device can be
|
||||
queried and set using ``cfg_services``.
|
||||
See Documentation/ABI/testing/sysfs-driver-qat for details.
|
||||
|
||||
The following table reports how ring pairs are mapped to VFs
|
||||
on the PF 0000:6b:0.0 configured for `sym;asym` or `asym;sym`:
|
||||
|
||||
=========== ============ =========== ============ ===========
|
||||
PCI BDF/VF RP0/service RP1/service RP2/service RP3/service
|
||||
=========== ============ =========== ============ ===========
|
||||
0000:6b:0.1 RP 0 asym RP 1 sym RP 2 asym RP 3 sym
|
||||
0000:6b:0.2 RP 4 asym RP 5 sym RP 6 asym RP 7 sym
|
||||
0000:6b:0.3 RP 8 asym RP 9 sym RP10 asym RP11 sym
|
||||
... ... ... ... ...
|
||||
=========== ============ =========== ============ ===========
|
||||
|
||||
All VFs follow the same pattern.
|
||||
|
||||
|
||||
The following table reports how ring pairs are mapped to VFs on
|
||||
the PF 0000:6b:0.0 configured for `dc`:
|
||||
|
||||
=========== ============ =========== ============ ===========
|
||||
PCI BDF/VF RP0/service RP1/service RP2/service RP3/service
|
||||
=========== ============ =========== ============ ===========
|
||||
0000:6b:0.1 RP 0 dc RP 1 dc RP 2 dc RP 3 dc
|
||||
0000:6b:0.2 RP 4 dc RP 5 dc RP 6 dc RP 7 dc
|
||||
0000:6b:0.3 RP 8 dc RP 9 dc RP10 dc RP11 dc
|
||||
... ... ... ... ...
|
||||
=========== ============ =========== ============ ===========
|
||||
|
||||
The mapping of a RP to a service can be retrieved using
|
||||
``rp2srv`` from sysfs.
|
||||
See Documentation/ABI/testing/sysfs-driver-qat for details.
|
||||
|
||||
This attribute is only available for qat_4xxx devices.
|
@ -101,7 +101,7 @@ What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/status
|
||||
Date: Apr 2020
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump the status of the QM.
|
||||
Four states: initiated, started, stopped and closed.
|
||||
Two states: work, stop.
|
||||
Available for both PF and VF, and take no other effect on HPRE.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/diff_regs
|
||||
|
@ -81,7 +81,7 @@ What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/status
|
||||
Date: Apr 2020
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump the status of the QM.
|
||||
Four states: initiated, started, stopped and closed.
|
||||
Two states: work, stop.
|
||||
Available for both PF and VF, and take no other effect on SEC.
|
||||
|
||||
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/diff_regs
|
||||
|
@ -94,7 +94,7 @@ What: /sys/kernel/debug/hisi_zip/<bdf>/qm/status
|
||||
Date: Apr 2020
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump the status of the QM.
|
||||
Four states: initiated, started, stopped and closed.
|
||||
Two states: work, stop.
|
||||
Available for both PF and VF, and take no other effect on ZIP.
|
||||
|
||||
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/diff_regs
|
||||
|
9
Documentation/crypto/device_drivers/index.rst
Normal file
9
Documentation/crypto/device_drivers/index.rst
Normal file
@ -0,0 +1,9 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Hardware Device Driver Specific Documentation
|
||||
---------------------------------------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
octeontx2
|
25
Documentation/crypto/device_drivers/octeontx2.rst
Normal file
25
Documentation/crypto/device_drivers/octeontx2.rst
Normal file
@ -0,0 +1,25 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=========================
|
||||
octeontx2 devlink support
|
||||
=========================
|
||||
|
||||
This document describes the devlink features implemented by the ``octeontx2 CPT``
|
||||
device drivers.
|
||||
|
||||
Parameters
|
||||
==========
|
||||
|
||||
The ``octeontx2`` driver implements the following driver-specific parameters.
|
||||
|
||||
.. list-table:: Driver-specific parameters implemented
|
||||
:widths: 5 5 5 85
|
||||
|
||||
* - Name
|
||||
- Type
|
||||
- Mode
|
||||
- Description
|
||||
* - ``t106_mode``
|
||||
- u8
|
||||
- runtime
|
||||
- Used to configure CN10KA B0/CN10KB CPT to work as CN10KA A0/A1.
|
@ -28,3 +28,4 @@ for cryptographic use cases, as well as programming examples.
|
||||
api
|
||||
api-samples
|
||||
descore-readme
|
||||
device_drivers/index
|
||||
|
@ -0,0 +1,86 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/crypto/inside-secure,safexcel.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Inside Secure SafeXcel cryptographic engine
|
||||
|
||||
maintainers:
|
||||
- Antoine Tenart <atenart@kernel.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- const: inside-secure,safexcel-eip197b
|
||||
- const: inside-secure,safexcel-eip197d
|
||||
- const: inside-secure,safexcel-eip97ies
|
||||
- const: inside-secure,safexcel-eip197
|
||||
description: Equivalent of inside-secure,safexcel-eip197b
|
||||
deprecated: true
|
||||
- const: inside-secure,safexcel-eip97
|
||||
description: Equivalent of inside-secure,safexcel-eip97ies
|
||||
deprecated: true
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 6
|
||||
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: ring0
|
||||
- const: ring1
|
||||
- const: ring2
|
||||
- const: ring3
|
||||
- const: eip
|
||||
- const: mem
|
||||
|
||||
clocks:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: core
|
||||
- const: reg
|
||||
|
||||
required:
|
||||
- reg
|
||||
- interrupts
|
||||
- interrupt-names
|
||||
|
||||
allOf:
|
||||
- if:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
then:
|
||||
properties:
|
||||
clock-names:
|
||||
minItems: 2
|
||||
required:
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
|
||||
crypto@800000 {
|
||||
compatible = "inside-secure,safexcel-eip197b";
|
||||
reg = <0x800000 0x200000>;
|
||||
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "ring0", "ring1", "ring2", "ring3", "eip", "mem";
|
||||
clocks = <&cpm_syscon0 1 26>;
|
||||
clock-names = "core";
|
||||
};
|
@ -1,40 +0,0 @@
|
||||
Inside Secure SafeXcel cryptographic engine
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "inside-secure,safexcel-eip197b",
|
||||
"inside-secure,safexcel-eip197d" or
|
||||
"inside-secure,safexcel-eip97ies".
|
||||
- reg: Base physical address of the engine and length of memory mapped region.
|
||||
- interrupts: Interrupt numbers for the rings and engine.
|
||||
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
|
||||
|
||||
Optional properties:
|
||||
- clocks: Reference to the crypto engine clocks, the second clock is
|
||||
needed for the Armada 7K/8K SoCs.
|
||||
- clock-names: mandatory if there is a second clock, in this case the
|
||||
name must be "core" for the first clock and "reg" for
|
||||
the second one.
|
||||
|
||||
Backward compatibility:
|
||||
Two compatibles are kept for backward compatibility, but shouldn't be used for
|
||||
new submissions:
|
||||
- "inside-secure,safexcel-eip197" is equivalent to
|
||||
"inside-secure,safexcel-eip197b".
|
||||
- "inside-secure,safexcel-eip97" is equivalent to
|
||||
"inside-secure,safexcel-eip97ies".
|
||||
|
||||
Example:
|
||||
|
||||
crypto: crypto@800000 {
|
||||
compatible = "inside-secure,safexcel-eip197b";
|
||||
reg = <0x800000 0x200000>;
|
||||
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3",
|
||||
"eip";
|
||||
clocks = <&cpm_syscon0 1 26>;
|
||||
};
|
@ -16,6 +16,7 @@ properties:
|
||||
- qcom,sa8775p-inline-crypto-engine
|
||||
- qcom,sm8450-inline-crypto-engine
|
||||
- qcom,sm8550-inline-crypto-engine
|
||||
- qcom,sm8650-inline-crypto-engine
|
||||
- const: qcom,inline-crypto-engine
|
||||
|
||||
reg:
|
||||
|
@ -21,6 +21,7 @@ properties:
|
||||
- qcom,sc7280-trng
|
||||
- qcom,sm8450-trng
|
||||
- qcom,sm8550-trng
|
||||
- qcom,sm8650-trng
|
||||
- const: qcom,trng
|
||||
|
||||
reg:
|
||||
|
@ -44,10 +44,12 @@ properties:
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,sc7280-qce
|
||||
- qcom,sm8250-qce
|
||||
- qcom,sm8350-qce
|
||||
- qcom,sm8450-qce
|
||||
- qcom,sm8550-qce
|
||||
- qcom,sm8650-qce
|
||||
- const: qcom,sm8150-qce
|
||||
- const: qcom,qce
|
||||
|
||||
@ -96,6 +98,7 @@ allOf:
|
||||
- qcom,crypto-v5.4
|
||||
- qcom,ipq6018-qce
|
||||
- qcom,ipq8074-qce
|
||||
- qcom,ipq9574-qce
|
||||
- qcom,msm8996-qce
|
||||
- qcom,sdm845-qce
|
||||
then:
|
||||
@ -129,6 +132,17 @@ allOf:
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,sm8150-qce
|
||||
then:
|
||||
properties:
|
||||
clocks: false
|
||||
clock-names: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
@ -11,7 +11,11 @@ maintainers:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: starfive,jh7110-trng
|
||||
oneOf:
|
||||
- items:
|
||||
- const: starfive,jh8100-trng
|
||||
- const: starfive,jh7110-trng
|
||||
- const: starfive,jh7110-trng
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
824
Documentation/driver-api/crypto/iaa/iaa-crypto.rst
Normal file
824
Documentation/driver-api/crypto/iaa/iaa-crypto.rst
Normal file
@ -0,0 +1,824 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=========================================
|
||||
IAA Compression Accelerator Crypto Driver
|
||||
=========================================
|
||||
|
||||
Tom Zanussi <tom.zanussi@linux.intel.com>
|
||||
|
||||
The IAA crypto driver supports compression/decompression compatible
|
||||
with the DEFLATE compression standard described in RFC 1951, which is
|
||||
the compression/decompression algorithm exported by this module.
|
||||
|
||||
The IAA hardware spec can be found here:
|
||||
|
||||
https://cdrdv2.intel.com/v1/dl/getContent/721858
|
||||
|
||||
The iaa_crypto driver is designed to work as a layer underneath
|
||||
higher-level compression devices such as zswap.
|
||||
|
||||
Users can select IAA compress/decompress acceleration by specifying
|
||||
one of the supported IAA compression algorithms in whatever facility
|
||||
allows compression algorithms to be selected.
|
||||
|
||||
For example, a zswap device can select the IAA 'fixed' mode
|
||||
represented by selecting the 'deflate-iaa' crypto compression
|
||||
algorithm::
|
||||
|
||||
# echo deflate-iaa > /sys/module/zswap/parameters/compressor
|
||||
|
||||
This will tell zswap to use the IAA 'fixed' compression mode for all
|
||||
compresses and decompresses.
|
||||
|
||||
Currently, there is only one compression modes available, 'fixed'
|
||||
mode.
|
||||
|
||||
The 'fixed' compression mode implements the compression scheme
|
||||
specified by RFC 1951 and is given the crypto algorithm name
|
||||
'deflate-iaa'. (Because the IAA hardware has a 4k history-window
|
||||
limitation, only buffers <= 4k, or that have been compressed using a
|
||||
<= 4k history window, are technically compliant with the deflate spec,
|
||||
which allows for a window of up to 32k. Because of this limitation,
|
||||
the IAA fixed mode deflate algorithm is given its own algorithm name
|
||||
rather than simply 'deflate').
|
||||
|
||||
|
||||
Config options and other setup
|
||||
==============================
|
||||
|
||||
The IAA crypto driver is available via menuconfig using the following
|
||||
path::
|
||||
|
||||
Cryptographic API -> Hardware crypto devices -> Support for Intel(R) IAA Compression Accelerator
|
||||
|
||||
In the configuration file the option called CONFIG_CRYPTO_DEV_IAA_CRYPTO.
|
||||
|
||||
The IAA crypto driver also supports statistics, which are available
|
||||
via menuconfig using the following path::
|
||||
|
||||
Cryptographic API -> Hardware crypto devices -> Support for Intel(R) IAA Compression -> Enable Intel(R) IAA Compression Accelerator Statistics
|
||||
|
||||
In the configuration file the option called CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS.
|
||||
|
||||
The following config options should also be enabled::
|
||||
|
||||
CONFIG_IRQ_REMAP=y
|
||||
CONFIG_INTEL_IOMMU=y
|
||||
CONFIG_INTEL_IOMMU_SVM=y
|
||||
CONFIG_PCI_ATS=y
|
||||
CONFIG_PCI_PRI=y
|
||||
CONFIG_PCI_PASID=y
|
||||
CONFIG_INTEL_IDXD=m
|
||||
CONFIG_INTEL_IDXD_SVM=y
|
||||
|
||||
IAA is one of the first Intel accelerator IPs that can work in
|
||||
conjunction with the Intel IOMMU. There are multiple modes that exist
|
||||
for testing. Based on IOMMU configuration, there are 3 modes::
|
||||
|
||||
- Scalable
|
||||
- Legacy
|
||||
- No IOMMU
|
||||
|
||||
|
||||
Scalable mode
|
||||
-------------
|
||||
|
||||
Scalable mode supports Shared Virtual Memory (SVM or SVA). It is
|
||||
entered when using the kernel boot commandline::
|
||||
|
||||
intel_iommu=on,sm_on
|
||||
|
||||
with VT-d turned on in BIOS.
|
||||
|
||||
With scalable mode, both shared and dedicated workqueues are available
|
||||
for use.
|
||||
|
||||
For scalable mode, the following BIOS settings should be enabled::
|
||||
|
||||
Socket Configuration > IIO Configuration > Intel VT for Directed I/O (VT-d) > Intel VT for Directed I/O
|
||||
|
||||
Socket Configuration > IIO Configuration > PCIe ENQCMD > ENQCMDS
|
||||
|
||||
|
||||
Legacy mode
|
||||
-----------
|
||||
|
||||
Legacy mode is entered when using the kernel boot commandline::
|
||||
|
||||
intel_iommu=off
|
||||
|
||||
or VT-d is not turned on in BIOS.
|
||||
|
||||
If you have booted into Linux and not sure if VT-d is on, do a "dmesg
|
||||
| grep -i dmar". If you don't see a number of DMAR devices enumerated,
|
||||
most likely VT-d is not on.
|
||||
|
||||
With legacy mode, only dedicated workqueues are available for use.
|
||||
|
||||
|
||||
No IOMMU mode
|
||||
-------------
|
||||
|
||||
No IOMMU mode is entered when using the kernel boot commandline::
|
||||
|
||||
iommu=off.
|
||||
|
||||
With no IOMMU mode, only dedicated workqueues are available for use.
|
||||
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
accel-config
|
||||
------------
|
||||
|
||||
When loaded, the iaa_crypto driver automatically creates a default
|
||||
configuration and enables it, and assigns default driver attributes.
|
||||
If a different configuration or set of driver attributes is required,
|
||||
the user must first disable the IAA devices and workqueues, reset the
|
||||
configuration, and then re-register the deflate-iaa algorithm with the
|
||||
crypto subsystem by removing and reinserting the iaa_crypto module.
|
||||
|
||||
The :ref:`iaa_disable_script` in the 'Use Cases'
|
||||
section below can be used to disable the default configuration.
|
||||
|
||||
See :ref:`iaa_default_config` below for details of the default
|
||||
configuration.
|
||||
|
||||
More likely than not, however, and because of the complexity and
|
||||
configurability of the accelerator devices, the user will want to
|
||||
configure the device and manually enable the desired devices and
|
||||
workqueues.
|
||||
|
||||
The userspace tool to help doing that is called accel-config. Using
|
||||
accel-config to configure device or loading a previously saved config
|
||||
is highly recommended. The device can be controlled via sysfs
|
||||
directly but comes with the warning that you should do this ONLY if
|
||||
you know exactly what you are doing. The following sections will not
|
||||
cover the sysfs interface but assumes you will be using accel-config.
|
||||
|
||||
The :ref:`iaa_sysfs_config` section in the appendix below can be
|
||||
consulted for the sysfs interface details if interested.
|
||||
|
||||
The accel-config tool along with instructions for building it can be
|
||||
found here:
|
||||
|
||||
https://github.com/intel/idxd-config/#readme
|
||||
|
||||
Typical usage
|
||||
-------------
|
||||
|
||||
In order for the iaa_crypto module to actually do any
|
||||
compression/decompression work on behalf of a facility, one or more
|
||||
IAA workqueues need to be bound to the iaa_crypto driver.
|
||||
|
||||
For instance, here's an example of configuring an IAA workqueue and
|
||||
binding it to the iaa_crypto driver (note that device names are
|
||||
specified as 'iax' rather than 'iaa' - this is because upstream still
|
||||
has the old 'iax' device naming in place) ::
|
||||
|
||||
# configure wq1.0
|
||||
|
||||
accel-config config-wq --group-id=0 --mode=dedicated --type=kernel --name="iaa_crypto" --device_name="crypto" iax1/wq1.0
|
||||
|
||||
# enable IAA device iax1
|
||||
|
||||
accel-config enable-device iax1
|
||||
|
||||
# enable wq1.0 on IAX device iax1
|
||||
|
||||
accel-config enable-wq iax1/wq1.0
|
||||
|
||||
Whenever a new workqueue is bound to or unbound from the iaa_crypto
|
||||
driver, the available workqueues are 'rebalanced' such that work
|
||||
submitted from a particular CPU is given to the most appropriate
|
||||
workqueue available. Current best practice is to configure and bind
|
||||
at least one workqueue for each IAA device, but as long as there is at
|
||||
least one workqueue configured and bound to any IAA device in the
|
||||
system, the iaa_crypto driver will work, albeit most likely not as
|
||||
efficiently.
|
||||
|
||||
The IAA crypto algorigthms is operational and compression and
|
||||
decompression operations are fully enabled following the successful
|
||||
binding of the first IAA workqueue to the iaa_crypto driver.
|
||||
|
||||
Similarly, the IAA crypto algorithm is not operational and compression
|
||||
and decompression operations are disabled following the unbinding of
|
||||
the last IAA worqueue to the iaa_crypto driver.
|
||||
|
||||
As a result, the IAA crypto algorithms and thus the IAA hardware are
|
||||
only available when one or more workques are bound to the iaa_crypto
|
||||
driver.
|
||||
|
||||
When there are no IAA workqueues bound to the driver, the IAA crypto
|
||||
algorithms can be unregistered by removing the module.
|
||||
|
||||
|
||||
Driver attributes
|
||||
-----------------
|
||||
|
||||
There are a couple user-configurable driver attributes that can be
|
||||
used to configure various modes of operation. They're listed below,
|
||||
along with their default values. To set any of these attributes, echo
|
||||
the appropriate values to the attribute file located under
|
||||
/sys/bus/dsa/drivers/crypto/
|
||||
|
||||
The attribute settings at the time the IAA algorithms are registered
|
||||
are captured in each algorithm's crypto_ctx and used for all compresses
|
||||
and decompresses when using that algorithm.
|
||||
|
||||
The available attributes are:
|
||||
|
||||
- verify_compress
|
||||
|
||||
Toggle compression verification. If set, each compress will be
|
||||
internally decompressed and the contents verified, returning error
|
||||
codes if unsuccessful. This can be toggled with 0/1::
|
||||
|
||||
echo 0 > /sys/bus/dsa/drivers/crypto/verify_compress
|
||||
|
||||
The default setting is '1' - verify all compresses.
|
||||
|
||||
- sync_mode
|
||||
|
||||
Select mode to be used to wait for completion of each compresses
|
||||
and decompress operation.
|
||||
|
||||
The crypto async interface support implemented by iaa_crypto
|
||||
provides an implementation that satisfies the interface but does
|
||||
so in a synchronous manner - it fills and submits the IDXD
|
||||
descriptor and then loops around waiting for it to complete before
|
||||
returning. This isn't a problem at the moment, since all existing
|
||||
callers (e.g. zswap) wrap any asynchronous callees in a
|
||||
synchronous wrapper anyway.
|
||||
|
||||
The iaa_crypto driver does however provide true asynchronous
|
||||
support for callers that can make use of it. In this mode, it
|
||||
fills and submits the IDXD descriptor, then returns immediately
|
||||
with -EINPROGRESS. The caller can then either poll for completion
|
||||
itself, which requires specific code in the caller which currently
|
||||
nothing in the upstream kernel implements, or go to sleep and wait
|
||||
for an interrupt signaling completion. This latter mode is
|
||||
supported by current users in the kernel such as zswap via
|
||||
synchronous wrappers. Although it is supported this mode is
|
||||
significantly slower than the synchronous mode that does the
|
||||
polling in the iaa_crypto driver previously mentioned.
|
||||
|
||||
This mode can be enabled by writing 'async_irq' to the sync_mode
|
||||
iaa_crypto driver attribute::
|
||||
|
||||
echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode
|
||||
|
||||
Async mode without interrupts (caller must poll) can be enabled by
|
||||
writing 'async' to it::
|
||||
|
||||
echo async > /sys/bus/dsa/drivers/crypto/sync_mode
|
||||
|
||||
The mode that does the polling in the iaa_crypto driver can be
|
||||
enabled by writing 'sync' to it::
|
||||
|
||||
echo sync > /sys/bus/dsa/drivers/crypto/sync_mode
|
||||
|
||||
The default mode is 'sync'.
|
||||
|
||||
.. _iaa_default_config:
|
||||
|
||||
IAA Default Configuration
|
||||
-------------------------
|
||||
|
||||
When the iaa_crypto driver is loaded, each IAA device has a single
|
||||
work queue configured for it, with the following attributes::
|
||||
|
||||
mode "dedicated"
|
||||
threshold 0
|
||||
size Total WQ Size from WQCAP
|
||||
priority 10
|
||||
type IDXD_WQT_KERNEL
|
||||
group 0
|
||||
name "iaa_crypto"
|
||||
driver_name "crypto"
|
||||
|
||||
The devices and workqueues are also enabled and therefore the driver
|
||||
is ready to be used without any additional configuration.
|
||||
|
||||
The default driver attributes in effect when the driver is loaded are::
|
||||
|
||||
sync_mode "sync"
|
||||
verify_compress 1
|
||||
|
||||
In order to change either the device/work queue or driver attributes,
|
||||
the enabled devices and workqueues must first be disabled. In order
|
||||
to have the new configuration applied to the deflate-iaa crypto
|
||||
algorithm, it needs to be re-registered by removing and reinserting
|
||||
the iaa_crypto module. The :ref:`iaa_disable_script` in the 'Use
|
||||
Cases' section below can be used to disable the default configuration.
|
||||
|
||||
Statistics
|
||||
==========
|
||||
|
||||
If the optional debugfs statistics support is enabled, the IAA crypto
|
||||
driver will generate statistics which can be accessed in debugfs at::
|
||||
|
||||
# ls -al /sys/kernel/debug/iaa-crypto/
|
||||
total 0
|
||||
drwxr-xr-x 2 root root 0 Mar 3 09:35 .
|
||||
drwx------ 47 root root 0 Mar 3 09:35 ..
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 max_acomp_delay_ns
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 max_adecomp_delay_ns
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 max_comp_delay_ns
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 max_decomp_delay_ns
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 stats_reset
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_bytes_out
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_calls
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_bytes_in
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_calls
|
||||
-rw-r--r-- 1 root root 0 Mar 3 09:35 wq_stats
|
||||
|
||||
Most of the above statisticss are self-explanatory. The wq_stats file
|
||||
shows per-wq stats, a set for each iaa device and wq in addition to
|
||||
some global stats::
|
||||
|
||||
# cat wq_stats
|
||||
global stats:
|
||||
total_comp_calls: 100
|
||||
total_decomp_calls: 100
|
||||
total_comp_bytes_out: 22800
|
||||
total_decomp_bytes_in: 22800
|
||||
total_completion_einval_errors: 0
|
||||
total_completion_timeout_errors: 0
|
||||
total_completion_comp_buf_overflow_errors: 0
|
||||
|
||||
iaa device:
|
||||
id: 1
|
||||
n_wqs: 1
|
||||
comp_calls: 0
|
||||
comp_bytes: 0
|
||||
decomp_calls: 0
|
||||
decomp_bytes: 0
|
||||
wqs:
|
||||
name: iaa_crypto
|
||||
comp_calls: 0
|
||||
comp_bytes: 0
|
||||
decomp_calls: 0
|
||||
decomp_bytes: 0
|
||||
|
||||
iaa device:
|
||||
id: 3
|
||||
n_wqs: 1
|
||||
comp_calls: 0
|
||||
comp_bytes: 0
|
||||
decomp_calls: 0
|
||||
decomp_bytes: 0
|
||||
wqs:
|
||||
name: iaa_crypto
|
||||
comp_calls: 0
|
||||
comp_bytes: 0
|
||||
decomp_calls: 0
|
||||
decomp_bytes: 0
|
||||
|
||||
iaa device:
|
||||
id: 5
|
||||
n_wqs: 1
|
||||
comp_calls: 100
|
||||
comp_bytes: 22800
|
||||
decomp_calls: 100
|
||||
decomp_bytes: 22800
|
||||
wqs:
|
||||
name: iaa_crypto
|
||||
comp_calls: 100
|
||||
comp_bytes: 22800
|
||||
decomp_calls: 100
|
||||
decomp_bytes: 22800
|
||||
|
||||
Writing 0 to 'stats_reset' resets all the stats, including the
|
||||
per-device and per-wq stats::
|
||||
|
||||
# echo 0 > stats_reset
|
||||
# cat wq_stats
|
||||
global stats:
|
||||
total_comp_calls: 0
|
||||
total_decomp_calls: 0
|
||||
total_comp_bytes_out: 0
|
||||
total_decomp_bytes_in: 0
|
||||
total_completion_einval_errors: 0
|
||||
total_completion_timeout_errors: 0
|
||||
total_completion_comp_buf_overflow_errors: 0
|
||||
...
|
||||
|
||||
|
||||
Use cases
|
||||
=========
|
||||
|
||||
Simple zswap test
|
||||
-----------------
|
||||
|
||||
For this example, the kernel should be configured according to the
|
||||
dedicated mode options described above, and zswap should be enabled as
|
||||
well::
|
||||
|
||||
CONFIG_ZSWAP=y
|
||||
|
||||
This is a simple test that uses iaa_compress as the compressor for a
|
||||
swap (zswap) device. It sets up the zswap device and then uses the
|
||||
memory_memadvise program listed below to forcibly swap out and in a
|
||||
specified number of pages, demonstrating both compress and decompress.
|
||||
|
||||
The zswap test expects the work queues for each IAA device on the
|
||||
system to be configured properly as a kernel workqueue with a
|
||||
workqueue driver_name of "crypto".
|
||||
|
||||
The first step is to make sure the iaa_crypto module is loaded::
|
||||
|
||||
modprobe iaa_crypto
|
||||
|
||||
If the IAA devices and workqueues haven't previously been disabled and
|
||||
reconfigured, then the default configuration should be in place and no
|
||||
further IAA configuration is necessary. See :ref:`iaa_default_config`
|
||||
below for details of the default configuration.
|
||||
|
||||
If the default configuration is in place, you should see the iaa
|
||||
devices and wq0s enabled::
|
||||
|
||||
# cat /sys/bus/dsa/devices/iax1/state
|
||||
enabled
|
||||
# cat /sys/bus/dsa/devices/iax1/wq1.0/state
|
||||
enabled
|
||||
|
||||
To demonstrate that the following steps work as expected, these
|
||||
commands can be used to enable debug output::
|
||||
|
||||
# echo -n 'module iaa_crypto +p' > /sys/kernel/debug/dynamic_debug/control
|
||||
# echo -n 'module idxd +p' > /sys/kernel/debug/dynamic_debug/control
|
||||
|
||||
Use the following commands to enable zswap::
|
||||
|
||||
# echo 0 > /sys/module/zswap/parameters/enabled
|
||||
# echo 50 > /sys/module/zswap/parameters/max_pool_percent
|
||||
# echo deflate-iaa > /sys/module/zswap/parameters/compressor
|
||||
# echo zsmalloc > /sys/module/zswap/parameters/zpool
|
||||
# echo 1 > /sys/module/zswap/parameters/enabled
|
||||
# echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled
|
||||
# echo 100 > /proc/sys/vm/swappiness
|
||||
# echo never > /sys/kernel/mm/transparent_hugepage/enabled
|
||||
# echo 1 > /proc/sys/vm/overcommit_memory
|
||||
|
||||
Now you can now run the zswap workload you want to measure. For
|
||||
example, using the memory_memadvise code below, the following command
|
||||
will swap in and out 100 pages::
|
||||
|
||||
./memory_madvise 100
|
||||
|
||||
Allocating 100 pages to swap in/out
|
||||
Swapping out 100 pages
|
||||
Swapping in 100 pages
|
||||
Swapped out and in 100 pages
|
||||
|
||||
You should see something like the following in the dmesg output::
|
||||
|
||||
[ 404.202972] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, src_addr 223925c000, nr_sgs 1, req->src 00000000ee7cb5e6, req->slen 4096, sg_dma_len(sg) 4096
|
||||
[ 404.202973] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, dst_addr 21dadf8000, nr_sgs 1, req->dst 000000008d6acea8, req->dlen 4096, sg_dma_len(sg) 8192
|
||||
[ 404.202975] idxd 0000:e7:02.0: iaa_compress: desc->src1_addr 223925c000, desc->src1_size 4096, desc->dst_addr 21dadf8000, desc->max_dst_size 4096, desc->src2_addr 2203543000, desc->src2_size 1568
|
||||
[ 404.202981] idxd 0000:e7:02.0: iaa_compress_verify: (verify) desc->src1_addr 21dadf8000, desc->src1_size 228, desc->dst_addr 223925c000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0
|
||||
...
|
||||
|
||||
Now that basic functionality has been demonstrated, the defaults can
|
||||
be erased and replaced with a different configuration. To do that,
|
||||
first disable zswap::
|
||||
|
||||
# echo lzo > /sys/module/zswap/parameters/compressor
|
||||
# swapoff -a
|
||||
# echo 0 > /sys/module/zswap/parameters/accept_threshold_percent
|
||||
# echo 0 > /sys/module/zswap/parameters/max_pool_percent
|
||||
# echo 0 > /sys/module/zswap/parameters/enabled
|
||||
# echo 0 > /sys/module/zswap/parameters/enabled
|
||||
|
||||
Then run the :ref:`iaa_disable_script` in the 'Use Cases' section
|
||||
below to disable the default configuration.
|
||||
|
||||
Finally turn swap back on::
|
||||
|
||||
# swapon -a
|
||||
|
||||
Following all that the IAA device(s) can now be re-configured and
|
||||
enabled as desired for further testing. Below is one example.
|
||||
|
||||
The zswap test expects the work queues for each IAA device on the
|
||||
system to be configured properly as a kernel workqueue with a
|
||||
workqueue driver_name of "crypto".
|
||||
|
||||
The below script automatically does that::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
echo "IAA devices:"
|
||||
lspci -d:0cfe
|
||||
echo "# IAA devices:"
|
||||
lspci -d:0cfe | wc -l
|
||||
|
||||
#
|
||||
# count iaa instances
|
||||
#
|
||||
iaa_dev_id="0cfe"
|
||||
num_iaa=$(lspci -d:${iaa_dev_id} | wc -l)
|
||||
echo "Found ${num_iaa} IAA instances"
|
||||
|
||||
#
|
||||
# disable iaa wqs and devices
|
||||
#
|
||||
echo "Disable IAA"
|
||||
|
||||
for ((i = 1; i < ${num_iaa} * 2; i += 2)); do
|
||||
echo disable wq iax${i}/wq${i}.0
|
||||
accel-config disable-wq iax${i}/wq${i}.0
|
||||
echo disable iaa iax${i}
|
||||
accel-config disable-device iax${i}
|
||||
done
|
||||
|
||||
echo "End Disable IAA"
|
||||
|
||||
#
|
||||
# configure iaa wqs and devices
|
||||
#
|
||||
echo "Configure IAA"
|
||||
for ((i = 1; i < ${num_iaa} * 2; i += 2)); do
|
||||
accel-config config-wq --group-id=0 --mode=dedicated --size=128 --priority=10 --type=kernel --name="iaa_crypto" --driver_name="crypto" iax${i}/wq${i}
|
||||
done
|
||||
|
||||
echo "End Configure IAA"
|
||||
|
||||
#
|
||||
# enable iaa wqs and devices
|
||||
#
|
||||
echo "Enable IAA"
|
||||
|
||||
for ((i = 1; i < ${num_iaa} * 2; i += 2)); do
|
||||
echo enable iaa iaa${i}
|
||||
accel-config enable-device iaa${i}
|
||||
echo enable wq iaa${i}/wq${i}.0
|
||||
accel-config enable-wq iaa${i}/wq${i}.0
|
||||
done
|
||||
|
||||
echo "End Enable IAA"
|
||||
|
||||
When the workqueues are bound to the iaa_crypto driver, you should
|
||||
see something similar to the following in dmesg output if you've
|
||||
enabled debug output (echo -n 'module iaa_crypto +p' >
|
||||
/sys/kernel/debug/dynamic_debug/control)::
|
||||
|
||||
[ 60.752344] idxd 0000:f6:02.0: add_iaa_wq: added wq 000000004068d14d to iaa 00000000c9585ba2, n_wq 1
|
||||
[ 60.752346] iaa_crypto: rebalance_wq_table: nr_nodes=2, nr_cpus 160, nr_iaa 8, cpus_per_iaa 20
|
||||
[ 60.752347] iaa_crypto: rebalance_wq_table: iaa=0
|
||||
[ 60.752349] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0)
|
||||
[ 60.752350] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0)
|
||||
[ 60.752352] iaa_crypto: rebalance_wq_table: assigned wq for cpu=0, node=0 = wq 00000000c8bb4452
|
||||
[ 60.752354] iaa_crypto: rebalance_wq_table: iaa=0
|
||||
[ 60.752355] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0)
|
||||
[ 60.752356] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0)
|
||||
[ 60.752358] iaa_crypto: rebalance_wq_table: assigned wq for cpu=1, node=0 = wq 00000000c8bb4452
|
||||
[ 60.752359] iaa_crypto: rebalance_wq_table: iaa=0
|
||||
[ 60.752360] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0)
|
||||
[ 60.752361] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0)
|
||||
[ 60.752362] iaa_crypto: rebalance_wq_table: assigned wq for cpu=2, node=0 = wq 00000000c8bb4452
|
||||
[ 60.752364] iaa_crypto: rebalance_wq_table: iaa=0
|
||||
.
|
||||
.
|
||||
.
|
||||
|
||||
Once the workqueues and devices have been enabled, the IAA crypto
|
||||
algorithms are enabled and available. When the IAA crypto algorithms
|
||||
have been successfully enabled, you should see the following dmesg
|
||||
output::
|
||||
|
||||
[ 64.893759] iaa_crypto: iaa_crypto_enable: iaa_crypto now ENABLED
|
||||
|
||||
Now run the following zswap-specific setup commands to have zswap use
|
||||
the 'fixed' compression mode::
|
||||
|
||||
echo 0 > /sys/module/zswap/parameters/enabled
|
||||
echo 50 > /sys/module/zswap/parameters/max_pool_percent
|
||||
echo deflate-iaa > /sys/module/zswap/parameters/compressor
|
||||
echo zsmalloc > /sys/module/zswap/parameters/zpool
|
||||
echo 1 > /sys/module/zswap/parameters/enabled
|
||||
echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled
|
||||
|
||||
echo 100 > /proc/sys/vm/swappiness
|
||||
echo never > /sys/kernel/mm/transparent_hugepage/enabled
|
||||
echo 1 > /proc/sys/vm/overcommit_memory
|
||||
|
||||
Finally, you can now run the zswap workload you want to measure. For
|
||||
example, using the code below, the following command will swap in and
|
||||
out 100 pages::
|
||||
|
||||
./memory_madvise 100
|
||||
|
||||
Allocating 100 pages to swap in/out
|
||||
Swapping out 100 pages
|
||||
Swapping in 100 pages
|
||||
Swapped out and in 100 pages
|
||||
|
||||
You should see something like the following in the dmesg output if
|
||||
you've enabled debug output (echo -n 'module iaa_crypto +p' >
|
||||
/sys/kernel/debug/dynamic_debug/control)::
|
||||
|
||||
[ 404.202972] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, src_addr 223925c000, nr_sgs 1, req->src 00000000ee7cb5e6, req->slen 4096, sg_dma_len(sg) 4096
|
||||
[ 404.202973] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, dst_addr 21dadf8000, nr_sgs 1, req->dst 000000008d6acea8, req->dlen 4096, sg_dma_len(sg) 8192
|
||||
[ 404.202975] idxd 0000:e7:02.0: iaa_compress: desc->src1_addr 223925c000, desc->src1_size 4096, desc->dst_addr 21dadf8000, desc->max_dst_size 4096, desc->src2_addr 2203543000, desc->src2_size 1568
|
||||
[ 404.202981] idxd 0000:e7:02.0: iaa_compress_verify: (verify) desc->src1_addr 21dadf8000, desc->src1_size 228, desc->dst_addr 223925c000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0
|
||||
[ 409.203227] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, src_addr 21ddd8b100, nr_sgs 1, req->src 0000000084adab64, req->slen 228, sg_dma_len(sg) 228
|
||||
[ 409.203235] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, dst_addr 21ee3dc000, nr_sgs 1, req->dst 000000004e2990d0, req->dlen 4096, sg_dma_len(sg) 4096
|
||||
[ 409.203239] idxd 0000:e7:02.0: iaa_decompress: desc->src1_addr 21ddd8b100, desc->src1_size 228, desc->dst_addr 21ee3dc000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0
|
||||
[ 409.203254] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, src_addr 21ddd8b100, nr_sgs 1, req->src 0000000084adab64, req->slen 228, sg_dma_len(sg) 228
|
||||
[ 409.203256] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, dst_addr 21f1551000, nr_sgs 1, req->dst 000000004e2990d0, req->dlen 4096, sg_dma_len(sg) 4096
|
||||
[ 409.203257] idxd 0000:e7:02.0: iaa_decompress: desc->src1_addr 21ddd8b100, desc->src1_size 228, desc->dst_addr 21f1551000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0
|
||||
|
||||
In order to unregister the IAA crypto algorithms, and register new
|
||||
ones using different parameters, any users of the current algorithm
|
||||
should be stopped and the IAA workqueues and devices disabled.
|
||||
|
||||
In the case of zswap, remove the IAA crypto algorithm as the
|
||||
compressor and turn off swap (to remove all references to
|
||||
iaa_crypto)::
|
||||
|
||||
echo lzo > /sys/module/zswap/parameters/compressor
|
||||
swapoff -a
|
||||
|
||||
echo 0 > /sys/module/zswap/parameters/accept_threshold_percent
|
||||
echo 0 > /sys/module/zswap/parameters/max_pool_percent
|
||||
echo 0 > /sys/module/zswap/parameters/enabled
|
||||
|
||||
Once zswap is disabled and no longer using iaa_crypto, the IAA wqs and
|
||||
devices can be disabled.
|
||||
|
||||
.. _iaa_disable_script:
|
||||
|
||||
IAA disable script
|
||||
------------------
|
||||
|
||||
The below script automatically does that::
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
echo "IAA devices:"
|
||||
lspci -d:0cfe
|
||||
echo "# IAA devices:"
|
||||
lspci -d:0cfe | wc -l
|
||||
|
||||
#
|
||||
# count iaa instances
|
||||
#
|
||||
iaa_dev_id="0cfe"
|
||||
num_iaa=$(lspci -d:${iaa_dev_id} | wc -l)
|
||||
echo "Found ${num_iaa} IAA instances"
|
||||
|
||||
#
|
||||
# disable iaa wqs and devices
|
||||
#
|
||||
echo "Disable IAA"
|
||||
|
||||
for ((i = 1; i < ${num_iaa} * 2; i += 2)); do
|
||||
echo disable wq iax${i}/wq${i}.0
|
||||
accel-config disable-wq iax${i}/wq${i}.0
|
||||
echo disable iaa iax${i}
|
||||
accel-config disable-device iax${i}
|
||||
done
|
||||
|
||||
echo "End Disable IAA"
|
||||
|
||||
Finally, at this point the iaa_crypto module can be removed, which
|
||||
will unregister the current IAA crypto algorithms::
|
||||
|
||||
rmmod iaa_crypto
|
||||
|
||||
|
||||
memory_madvise.c (gcc -o memory_memadvise memory_madvise.c)::
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#include <linux/mman.h>
|
||||
|
||||
#ifndef MADV_PAGEOUT
|
||||
#define MADV_PAGEOUT 21 /* force pages out immediately */
|
||||
#endif
|
||||
|
||||
#define PG_SZ 4096
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int i, nr_pages = 1;
|
||||
int64_t *dump_ptr;
|
||||
char *addr, *a;
|
||||
int loop = 1;
|
||||
|
||||
if (argc > 1)
|
||||
nr_pages = atoi(argv[1]);
|
||||
|
||||
printf("Allocating %d pages to swap in/out\n", nr_pages);
|
||||
|
||||
/* allocate pages */
|
||||
addr = mmap(NULL, nr_pages * PG_SZ, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
||||
*addr = 1;
|
||||
|
||||
/* initialize data in page to all '*' chars */
|
||||
memset(addr, '*', nr_pages * PG_SZ);
|
||||
|
||||
printf("Swapping out %d pages\n", nr_pages);
|
||||
|
||||
/* Tell kernel to swap it out */
|
||||
madvise(addr, nr_pages * PG_SZ, MADV_PAGEOUT);
|
||||
|
||||
while (loop > 0) {
|
||||
/* Wait for swap out to finish */
|
||||
sleep(5);
|
||||
|
||||
a = addr;
|
||||
|
||||
printf("Swapping in %d pages\n", nr_pages);
|
||||
|
||||
/* Access the page ... this will swap it back in again */
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (a[0] != '*') {
|
||||
printf("Bad data from decompress!!!!!\n");
|
||||
|
||||
dump_ptr = (int64_t *)a;
|
||||
for (int j = 0; j < 100; j++) {
|
||||
printf(" page %d data: %#llx\n", i, *dump_ptr);
|
||||
dump_ptr++;
|
||||
}
|
||||
}
|
||||
|
||||
a += PG_SZ;
|
||||
}
|
||||
|
||||
loop --;
|
||||
}
|
||||
|
||||
printf("Swapped out and in %d pages\n", nr_pages);
|
||||
|
||||
Appendix
|
||||
========
|
||||
|
||||
.. _iaa_sysfs_config:
|
||||
|
||||
IAA sysfs config interface
|
||||
--------------------------
|
||||
|
||||
Below is a description of the IAA sysfs interface, which as mentioned
|
||||
in the main document, should only be used if you know exactly what you
|
||||
are doing. Even then, there's no compelling reason to use it directly
|
||||
since accel-config can do everything the sysfs interface can and in
|
||||
fact accel-config is based on it under the covers.
|
||||
|
||||
The 'IAA config path' is /sys/bus/dsa/devices and contains
|
||||
subdirectories representing each IAA device, workqueue, engine, and
|
||||
group. Note that in the sysfs interface, the IAA devices are actually
|
||||
named using iax e.g. iax1, iax3, etc. (Note that IAA devices are the
|
||||
odd-numbered devices; the even-numbered devices are DSA devices and
|
||||
can be ignored for IAA).
|
||||
|
||||
The 'IAA device bind path' is /sys/bus/dsa/drivers/idxd/bind and is
|
||||
the file that is written to enable an IAA device.
|
||||
|
||||
The 'IAA workqueue bind path' is /sys/bus/dsa/drivers/crypto/bind and
|
||||
is the file that is written to enable an IAA workqueue.
|
||||
|
||||
Similarly /sys/bus/dsa/drivers/idxd/unbind and
|
||||
/sys/bus/dsa/drivers/crypto/unbind are used to disable IAA devices and
|
||||
workqueues.
|
||||
|
||||
The basic sequence of commands needed to set up the IAA devices and
|
||||
workqueues is:
|
||||
|
||||
For each device::
|
||||
1) Disable any workqueues enabled on the device. For example to
|
||||
disable workques 0 and 1 on IAA device 3::
|
||||
|
||||
# echo wq3.0 > /sys/bus/dsa/drivers/crypto/unbind
|
||||
# echo wq3.1 > /sys/bus/dsa/drivers/crypto/unbind
|
||||
|
||||
2) Disable the device. For example to disable IAA device 3::
|
||||
|
||||
# echo iax3 > /sys/bus/dsa/drivers/idxd/unbind
|
||||
|
||||
3) configure the desired workqueues. For example, to configure
|
||||
workqueue 3 on IAA device 3::
|
||||
|
||||
# echo dedicated > /sys/bus/dsa/devices/iax3/wq3.3/mode
|
||||
# echo 128 > /sys/bus/dsa/devices/iax3/wq3.3/size
|
||||
# echo 0 > /sys/bus/dsa/devices/iax3/wq3.3/group_id
|
||||
# echo 10 > /sys/bus/dsa/devices/iax3/wq3.3/priority
|
||||
# echo "kernel" > /sys/bus/dsa/devices/iax3/wq3.3/type
|
||||
# echo "iaa_crypto" > /sys/bus/dsa/devices/iax3/wq3.3/name
|
||||
# echo "crypto" > /sys/bus/dsa/devices/iax3/wq3.3/driver_name
|
||||
|
||||
4) Enable the device. For example to enable IAA device 3::
|
||||
|
||||
# echo iax3 > /sys/bus/dsa/drivers/idxd/bind
|
||||
|
||||
5) Enable the desired workqueues on the device. For example to
|
||||
enable workques 0 and 1 on IAA device 3::
|
||||
|
||||
# echo wq3.0 > /sys/bus/dsa/drivers/crypto/bind
|
||||
# echo wq3.1 > /sys/bus/dsa/drivers/crypto/bind
|
20
Documentation/driver-api/crypto/iaa/index.rst
Normal file
20
Documentation/driver-api/crypto/iaa/index.rst
Normal file
@ -0,0 +1,20 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=================================
|
||||
IAA (Intel Analytics Accelerator)
|
||||
=================================
|
||||
|
||||
IAA provides hardware compression and decompression via the crypto
|
||||
API.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
iaa-crypto
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
Indices
|
||||
=======
|
||||
|
||||
* :ref:`genindex`
|
20
Documentation/driver-api/crypto/index.rst
Normal file
20
Documentation/driver-api/crypto/index.rst
Normal file
@ -0,0 +1,20 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==============
|
||||
Crypto Drivers
|
||||
==============
|
||||
|
||||
Documentation for crypto drivers that may need more involved setup and
|
||||
configuration.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
iaa/index
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
Indices
|
||||
=======
|
||||
|
||||
* :ref:`genindex`
|
@ -116,6 +116,7 @@ available subsections can be seen below.
|
||||
wmi
|
||||
dpll
|
||||
wbrf
|
||||
crypto/index
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
|
15
MAINTAINERS
15
MAINTAINERS
@ -5538,6 +5538,12 @@ F: include/crypto/
|
||||
F: include/linux/crypto*
|
||||
F: lib/crypto/
|
||||
|
||||
CRYPTO SPEED TEST COMPARE
|
||||
M: Wang Jinchao <wangjinchao@xfusion.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: tools/crypto/tcrypt/tcrypt_speed_compare.py
|
||||
|
||||
CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
|
||||
M: Neil Horman <nhorman@tuxdriver.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
@ -9526,6 +9532,7 @@ F: Documentation/devicetree/bindings/gpio/hisilicon,ascend910-gpio.yaml
|
||||
F: drivers/gpio/gpio-hisi.c
|
||||
|
||||
HISILICON HIGH PERFORMANCE RSA ENGINE DRIVER (HPRE)
|
||||
M: Zhiqi Song <songzhiqi1@huawei.com>
|
||||
M: Longfang Liu <liulongfang@huawei.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -9628,7 +9635,6 @@ F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
|
||||
F: drivers/scsi/hisi_sas/
|
||||
|
||||
HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
|
||||
M: Kai Ye <yekai13@huawei.com>
|
||||
M: Longfang Liu <liulongfang@huawei.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -10697,6 +10703,13 @@ S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
|
||||
F: drivers/dma/ioat*
|
||||
|
||||
INTEL IAA CRYPTO DRIVER
|
||||
M: Tom Zanussi <tom.zanussi@linux.intel.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst
|
||||
F: drivers/crypto/intel/iaa/*
|
||||
|
||||
INTEL IDLE DRIVER
|
||||
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
|
||||
M: Len Brown <lenb@kernel.org>
|
||||
|
@ -231,7 +231,7 @@ config CRYPTO_SM4_ARM64_CE
|
||||
- NEON (Advanced SIMD) extensions
|
||||
|
||||
config CRYPTO_SM4_ARM64_CE_BLK
|
||||
tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR/XTS (ARMv8 Crypto Extensions)"
|
||||
tristate "Ciphers: SM4, modes: ECB/CBC/CTR/XTS (ARMv8 Crypto Extensions)"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_SM4
|
||||
@ -240,7 +240,6 @@ config CRYPTO_SM4_ARM64_CE_BLK
|
||||
with block cipher modes:
|
||||
- ECB (Electronic Codebook) mode (NIST SP800-38A)
|
||||
- CBC (Cipher Block Chaining) mode (NIST SP800-38A)
|
||||
- CFB (Cipher Feedback) mode (NIST SP800-38A)
|
||||
- CTR (Counter) mode (NIST SP800-38A)
|
||||
- XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
|
||||
and IEEE 1619)
|
||||
@ -250,7 +249,7 @@ config CRYPTO_SM4_ARM64_CE_BLK
|
||||
- NEON (Advanced SIMD) extensions
|
||||
|
||||
config CRYPTO_SM4_ARM64_NEON_BLK
|
||||
tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (NEON)"
|
||||
tristate "Ciphers: SM4, modes: ECB/CBC/CTR (NEON)"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_SM4
|
||||
@ -259,7 +258,6 @@ config CRYPTO_SM4_ARM64_NEON_BLK
|
||||
with block cipher modes:
|
||||
- ECB (Electronic Codebook) mode (NIST SP800-38A)
|
||||
- CBC (Cipher Block Chaining) mode (NIST SP800-38A)
|
||||
- CFB (Cipher Feedback) mode (NIST SP800-38A)
|
||||
- CTR (Counter) mode (NIST SP800-38A)
|
||||
|
||||
Architecture: arm64 using:
|
||||
|
@ -402,164 +402,6 @@ SYM_FUNC_START(sm4_ce_cbc_cts_dec)
|
||||
ret
|
||||
SYM_FUNC_END(sm4_ce_cbc_cts_dec)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_cfb_enc)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
* x2: src
|
||||
* x3: iv (big endian, 128 bit)
|
||||
* w4: nblocks
|
||||
*/
|
||||
SM4_PREPARE(x0)
|
||||
|
||||
ld1 {RIV.16b}, [x3]
|
||||
|
||||
.Lcfb_enc_loop_4x:
|
||||
cmp w4, #4
|
||||
blt .Lcfb_enc_loop_1x
|
||||
|
||||
sub w4, w4, #4
|
||||
|
||||
ld1 {v0.16b-v3.16b}, [x2], #64
|
||||
|
||||
rev32 v8.16b, RIV.16b
|
||||
SM4_CRYPT_BLK_BE(v8)
|
||||
eor v0.16b, v0.16b, v8.16b
|
||||
|
||||
rev32 v8.16b, v0.16b
|
||||
SM4_CRYPT_BLK_BE(v8)
|
||||
eor v1.16b, v1.16b, v8.16b
|
||||
|
||||
rev32 v8.16b, v1.16b
|
||||
SM4_CRYPT_BLK_BE(v8)
|
||||
eor v2.16b, v2.16b, v8.16b
|
||||
|
||||
rev32 v8.16b, v2.16b
|
||||
SM4_CRYPT_BLK_BE(v8)
|
||||
eor v3.16b, v3.16b, v8.16b
|
||||
|
||||
st1 {v0.16b-v3.16b}, [x1], #64
|
||||
mov RIV.16b, v3.16b
|
||||
|
||||
cbz w4, .Lcfb_enc_end
|
||||
b .Lcfb_enc_loop_4x
|
||||
|
||||
.Lcfb_enc_loop_1x:
|
||||
sub w4, w4, #1
|
||||
|
||||
ld1 {v0.16b}, [x2], #16
|
||||
|
||||
SM4_CRYPT_BLK(RIV)
|
||||
eor RIV.16b, RIV.16b, v0.16b
|
||||
|
||||
st1 {RIV.16b}, [x1], #16
|
||||
|
||||
cbnz w4, .Lcfb_enc_loop_1x
|
||||
|
||||
.Lcfb_enc_end:
|
||||
/* store new IV */
|
||||
st1 {RIV.16b}, [x3]
|
||||
|
||||
ret
|
||||
SYM_FUNC_END(sm4_ce_cfb_enc)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_cfb_dec)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
* x2: src
|
||||
* x3: iv (big endian, 128 bit)
|
||||
* w4: nblocks
|
||||
*/
|
||||
SM4_PREPARE(x0)
|
||||
|
||||
ld1 {RIV.16b}, [x3]
|
||||
|
||||
.Lcfb_dec_loop_8x:
|
||||
sub w4, w4, #8
|
||||
tbnz w4, #31, .Lcfb_dec_4x
|
||||
|
||||
ld1 {v0.16b-v3.16b}, [x2], #64
|
||||
ld1 {v4.16b-v7.16b}, [x2], #64
|
||||
|
||||
rev32 v8.16b, RIV.16b
|
||||
rev32 v9.16b, v0.16b
|
||||
rev32 v10.16b, v1.16b
|
||||
rev32 v11.16b, v2.16b
|
||||
rev32 v12.16b, v3.16b
|
||||
rev32 v13.16b, v4.16b
|
||||
rev32 v14.16b, v5.16b
|
||||
rev32 v15.16b, v6.16b
|
||||
|
||||
SM4_CRYPT_BLK8_BE(v8, v9, v10, v11, v12, v13, v14, v15)
|
||||
|
||||
mov RIV.16b, v7.16b
|
||||
|
||||
eor v0.16b, v0.16b, v8.16b
|
||||
eor v1.16b, v1.16b, v9.16b
|
||||
eor v2.16b, v2.16b, v10.16b
|
||||
eor v3.16b, v3.16b, v11.16b
|
||||
eor v4.16b, v4.16b, v12.16b
|
||||
eor v5.16b, v5.16b, v13.16b
|
||||
eor v6.16b, v6.16b, v14.16b
|
||||
eor v7.16b, v7.16b, v15.16b
|
||||
|
||||
st1 {v0.16b-v3.16b}, [x1], #64
|
||||
st1 {v4.16b-v7.16b}, [x1], #64
|
||||
|
||||
cbz w4, .Lcfb_dec_end
|
||||
b .Lcfb_dec_loop_8x
|
||||
|
||||
.Lcfb_dec_4x:
|
||||
add w4, w4, #8
|
||||
cmp w4, #4
|
||||
blt .Lcfb_dec_loop_1x
|
||||
|
||||
sub w4, w4, #4
|
||||
|
||||
ld1 {v0.16b-v3.16b}, [x2], #64
|
||||
|
||||
rev32 v8.16b, RIV.16b
|
||||
rev32 v9.16b, v0.16b
|
||||
rev32 v10.16b, v1.16b
|
||||
rev32 v11.16b, v2.16b
|
||||
|
||||
SM4_CRYPT_BLK4_BE(v8, v9, v10, v11)
|
||||
|
||||
mov RIV.16b, v3.16b
|
||||
|
||||
eor v0.16b, v0.16b, v8.16b
|
||||
eor v1.16b, v1.16b, v9.16b
|
||||
eor v2.16b, v2.16b, v10.16b
|
||||
eor v3.16b, v3.16b, v11.16b
|
||||
|
||||
st1 {v0.16b-v3.16b}, [x1], #64
|
||||
|
||||
cbz w4, .Lcfb_dec_end
|
||||
|
||||
.Lcfb_dec_loop_1x:
|
||||
sub w4, w4, #1
|
||||
|
||||
ld1 {v0.16b}, [x2], #16
|
||||
|
||||
SM4_CRYPT_BLK(RIV)
|
||||
|
||||
eor RIV.16b, RIV.16b, v0.16b
|
||||
st1 {RIV.16b}, [x1], #16
|
||||
|
||||
mov RIV.16b, v0.16b
|
||||
|
||||
cbnz w4, .Lcfb_dec_loop_1x
|
||||
|
||||
.Lcfb_dec_end:
|
||||
/* store new IV */
|
||||
st1 {RIV.16b}, [x3]
|
||||
|
||||
ret
|
||||
SYM_FUNC_END(sm4_ce_cfb_dec)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_ce_ctr_enc)
|
||||
/* input:
|
||||
|
@ -37,10 +37,6 @@ asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nbytes);
|
||||
asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nbytes);
|
||||
asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblks);
|
||||
asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblks);
|
||||
asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblks);
|
||||
asmlinkage void sm4_ce_xts_enc(const u32 *rkey1, u8 *dst, const u8 *src,
|
||||
@ -56,7 +52,6 @@ asmlinkage void sm4_ce_mac_update(const u32 *rkey_enc, u8 *digest,
|
||||
EXPORT_SYMBOL(sm4_ce_expand_key);
|
||||
EXPORT_SYMBOL(sm4_ce_crypt_block);
|
||||
EXPORT_SYMBOL(sm4_ce_cbc_enc);
|
||||
EXPORT_SYMBOL(sm4_ce_cfb_enc);
|
||||
|
||||
struct sm4_xts_ctx {
|
||||
struct sm4_ctx key1;
|
||||
@ -280,90 +275,6 @@ static int sm4_cbc_cts_decrypt(struct skcipher_request *req)
|
||||
return sm4_cbc_cts_crypt(req, false);
|
||||
}
|
||||
|
||||
static int sm4_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) > 0) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
unsigned int nblks;
|
||||
|
||||
kernel_neon_begin();
|
||||
|
||||
nblks = BYTES2BLKS(nbytes);
|
||||
if (nblks) {
|
||||
sm4_ce_cfb_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
|
||||
dst += nblks * SM4_BLOCK_SIZE;
|
||||
src += nblks * SM4_BLOCK_SIZE;
|
||||
nbytes -= nblks * SM4_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
/* tail */
|
||||
if (walk.nbytes == walk.total && nbytes > 0) {
|
||||
u8 keystream[SM4_BLOCK_SIZE];
|
||||
|
||||
sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
|
||||
crypto_xor_cpy(dst, src, keystream, nbytes);
|
||||
nbytes = 0;
|
||||
}
|
||||
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sm4_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) > 0) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
unsigned int nblks;
|
||||
|
||||
kernel_neon_begin();
|
||||
|
||||
nblks = BYTES2BLKS(nbytes);
|
||||
if (nblks) {
|
||||
sm4_ce_cfb_dec(ctx->rkey_enc, dst, src, walk.iv, nblks);
|
||||
dst += nblks * SM4_BLOCK_SIZE;
|
||||
src += nblks * SM4_BLOCK_SIZE;
|
||||
nbytes -= nblks * SM4_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
/* tail */
|
||||
if (walk.nbytes == walk.total && nbytes > 0) {
|
||||
u8 keystream[SM4_BLOCK_SIZE];
|
||||
|
||||
sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
|
||||
crypto_xor_cpy(dst, src, keystream, nbytes);
|
||||
nbytes = 0;
|
||||
}
|
||||
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sm4_ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
@ -542,22 +453,6 @@ static struct skcipher_alg sm4_algs[] = {
|
||||
.setkey = sm4_setkey,
|
||||
.encrypt = sm4_cbc_encrypt,
|
||||
.decrypt = sm4_cbc_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "cfb(sm4)",
|
||||
.cra_driver_name = "cfb-sm4-ce",
|
||||
.cra_priority = 400,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct sm4_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = SM4_KEY_SIZE,
|
||||
.max_keysize = SM4_KEY_SIZE,
|
||||
.ivsize = SM4_BLOCK_SIZE,
|
||||
.chunksize = SM4_BLOCK_SIZE,
|
||||
.setkey = sm4_setkey,
|
||||
.encrypt = sm4_cfb_encrypt,
|
||||
.decrypt = sm4_cfb_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "ctr(sm4)",
|
||||
@ -869,12 +764,11 @@ static void __exit sm4_exit(void)
|
||||
module_cpu_feature_match(SM4, sm4_init);
|
||||
module_exit(sm4_exit);
|
||||
|
||||
MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR/XTS using ARMv8 Crypto Extensions");
|
||||
MODULE_DESCRIPTION("SM4 ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
|
||||
MODULE_ALIAS_CRYPTO("sm4-ce");
|
||||
MODULE_ALIAS_CRYPTO("sm4");
|
||||
MODULE_ALIAS_CRYPTO("ecb(sm4)");
|
||||
MODULE_ALIAS_CRYPTO("cbc(sm4)");
|
||||
MODULE_ALIAS_CRYPTO("cfb(sm4)");
|
||||
MODULE_ALIAS_CRYPTO("ctr(sm4)");
|
||||
MODULE_ALIAS_CRYPTO("cts(cbc(sm4))");
|
||||
MODULE_ALIAS_CRYPTO("xts(sm4)");
|
||||
|
@ -11,6 +11,3 @@ void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
|
||||
|
||||
void sm4_ce_cbc_enc(const u32 *rkey_enc, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblocks);
|
||||
|
||||
void sm4_ce_cfb_enc(const u32 *rkey_enc, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblocks);
|
||||
|
@ -437,119 +437,6 @@ SYM_FUNC_START(sm4_neon_cbc_dec)
|
||||
ret
|
||||
SYM_FUNC_END(sm4_neon_cbc_dec)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_neon_cfb_dec)
|
||||
/* input:
|
||||
* x0: round key array, CTX
|
||||
* x1: dst
|
||||
* x2: src
|
||||
* x3: iv (big endian, 128 bit)
|
||||
* w4: nblocks
|
||||
*/
|
||||
SM4_PREPARE()
|
||||
|
||||
ld1 {v0.16b}, [x3]
|
||||
|
||||
.Lcfb_dec_loop_8x:
|
||||
sub w4, w4, #8
|
||||
tbnz w4, #31, .Lcfb_dec_4x
|
||||
|
||||
ld1 {v1.16b-v3.16b}, [x2], #48
|
||||
ld4 {v4.4s-v7.4s}, [x2]
|
||||
|
||||
transpose_4x4(v0, v1, v2, v3)
|
||||
|
||||
SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7)
|
||||
|
||||
sub x2, x2, #48
|
||||
ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64
|
||||
ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64
|
||||
|
||||
eor v0.16b, v0.16b, RTMP0.16b
|
||||
eor v1.16b, v1.16b, RTMP1.16b
|
||||
eor v2.16b, v2.16b, RTMP2.16b
|
||||
eor v3.16b, v3.16b, RTMP3.16b
|
||||
eor v4.16b, v4.16b, RTMP4.16b
|
||||
eor v5.16b, v5.16b, RTMP5.16b
|
||||
eor v6.16b, v6.16b, RTMP6.16b
|
||||
eor v7.16b, v7.16b, RTMP7.16b
|
||||
|
||||
st1 {v0.16b-v3.16b}, [x1], #64
|
||||
st1 {v4.16b-v7.16b}, [x1], #64
|
||||
|
||||
mov v0.16b, RTMP7.16b
|
||||
|
||||
cbz w4, .Lcfb_dec_end
|
||||
b .Lcfb_dec_loop_8x
|
||||
|
||||
.Lcfb_dec_4x:
|
||||
add w4, w4, #8
|
||||
cmp w4, #4
|
||||
blt .Lcfb_dec_tail
|
||||
|
||||
sub w4, w4, #4
|
||||
|
||||
ld1 {v4.16b-v7.16b}, [x2], #64
|
||||
|
||||
rev32 v0.16b, v0.16b /* v0 is IV register */
|
||||
rev32 v1.16b, v4.16b
|
||||
rev32 v2.16b, v5.16b
|
||||
rev32 v3.16b, v6.16b
|
||||
|
||||
transpose_4x4(v0, v1, v2, v3)
|
||||
|
||||
SM4_CRYPT_BLK4_BE(v0, v1, v2, v3)
|
||||
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
eor v2.16b, v2.16b, v6.16b
|
||||
eor v3.16b, v3.16b, v7.16b
|
||||
|
||||
st1 {v0.16b-v3.16b}, [x1], #64
|
||||
|
||||
mov v0.16b, v7.16b
|
||||
|
||||
cbz w4, .Lcfb_dec_end
|
||||
|
||||
.Lcfb_dec_tail:
|
||||
cmp w4, #2
|
||||
ld1 {v4.16b}, [x2], #16
|
||||
blt .Lcfb_dec_tail_load_done
|
||||
ld1 {v5.16b}, [x2], #16
|
||||
beq .Lcfb_dec_tail_load_done
|
||||
ld1 {v6.16b}, [x2], #16
|
||||
|
||||
.Lcfb_dec_tail_load_done:
|
||||
rev32 v0.16b, v0.16b /* v0 is IV register */
|
||||
rev32 v1.16b, v4.16b
|
||||
rev32 v2.16b, v5.16b
|
||||
|
||||
transpose_4x4(v0, v1, v2, v3)
|
||||
|
||||
SM4_CRYPT_BLK4_BE(v0, v1, v2, v3)
|
||||
|
||||
cmp w4, #2
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
st1 {v0.16b}, [x1], #16
|
||||
mov v0.16b, v4.16b
|
||||
blt .Lcfb_dec_end
|
||||
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
st1 {v1.16b}, [x1], #16
|
||||
mov v0.16b, v5.16b
|
||||
beq .Lcfb_dec_end
|
||||
|
||||
eor v2.16b, v2.16b, v6.16b
|
||||
st1 {v2.16b}, [x1], #16
|
||||
mov v0.16b, v6.16b
|
||||
|
||||
.Lcfb_dec_end:
|
||||
/* store new IV */
|
||||
st1 {v0.16b}, [x3]
|
||||
|
||||
ret
|
||||
SYM_FUNC_END(sm4_neon_cfb_dec)
|
||||
|
||||
.align 3
|
||||
SYM_FUNC_START(sm4_neon_ctr_crypt)
|
||||
/* input:
|
||||
|
@ -22,8 +22,6 @@ asmlinkage void sm4_neon_crypt(const u32 *rkey, u8 *dst, const u8 *src,
|
||||
unsigned int nblocks);
|
||||
asmlinkage void sm4_neon_cbc_dec(const u32 *rkey_dec, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblocks);
|
||||
asmlinkage void sm4_neon_cfb_dec(const u32 *rkey_enc, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblocks);
|
||||
asmlinkage void sm4_neon_ctr_crypt(const u32 *rkey_enc, u8 *dst, const u8 *src,
|
||||
u8 *iv, unsigned int nblocks);
|
||||
|
||||
@ -142,90 +140,6 @@ static int sm4_cbc_decrypt(struct skcipher_request *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sm4_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) > 0) {
|
||||
u8 keystream[SM4_BLOCK_SIZE];
|
||||
const u8 *iv = walk.iv;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
|
||||
while (nbytes >= SM4_BLOCK_SIZE) {
|
||||
sm4_crypt_block(ctx->rkey_enc, keystream, iv);
|
||||
crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
|
||||
iv = dst;
|
||||
src += SM4_BLOCK_SIZE;
|
||||
dst += SM4_BLOCK_SIZE;
|
||||
nbytes -= SM4_BLOCK_SIZE;
|
||||
}
|
||||
if (iv != walk.iv)
|
||||
memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
|
||||
|
||||
/* tail */
|
||||
if (walk.nbytes == walk.total && nbytes > 0) {
|
||||
sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
|
||||
crypto_xor_cpy(dst, src, keystream, nbytes);
|
||||
nbytes = 0;
|
||||
}
|
||||
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sm4_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) > 0) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
unsigned int nblocks;
|
||||
|
||||
nblocks = nbytes / SM4_BLOCK_SIZE;
|
||||
if (nblocks) {
|
||||
kernel_neon_begin();
|
||||
|
||||
sm4_neon_cfb_dec(ctx->rkey_enc, dst, src,
|
||||
walk.iv, nblocks);
|
||||
|
||||
kernel_neon_end();
|
||||
|
||||
dst += nblocks * SM4_BLOCK_SIZE;
|
||||
src += nblocks * SM4_BLOCK_SIZE;
|
||||
nbytes -= nblocks * SM4_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
/* tail */
|
||||
if (walk.nbytes == walk.total && nbytes > 0) {
|
||||
u8 keystream[SM4_BLOCK_SIZE];
|
||||
|
||||
sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
|
||||
crypto_xor_cpy(dst, src, keystream, nbytes);
|
||||
nbytes = 0;
|
||||
}
|
||||
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sm4_ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
@ -301,22 +215,6 @@ static struct skcipher_alg sm4_algs[] = {
|
||||
.setkey = sm4_setkey,
|
||||
.encrypt = sm4_cbc_encrypt,
|
||||
.decrypt = sm4_cbc_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "cfb(sm4)",
|
||||
.cra_driver_name = "cfb-sm4-neon",
|
||||
.cra_priority = 200,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct sm4_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = SM4_KEY_SIZE,
|
||||
.max_keysize = SM4_KEY_SIZE,
|
||||
.ivsize = SM4_BLOCK_SIZE,
|
||||
.chunksize = SM4_BLOCK_SIZE,
|
||||
.setkey = sm4_setkey,
|
||||
.encrypt = sm4_cfb_encrypt,
|
||||
.decrypt = sm4_cfb_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "ctr(sm4)",
|
||||
@ -349,12 +247,11 @@ static void __exit sm4_exit(void)
|
||||
module_init(sm4_init);
|
||||
module_exit(sm4_exit);
|
||||
|
||||
MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 NEON");
|
||||
MODULE_DESCRIPTION("SM4 ECB/CBC/CTR using ARMv8 NEON");
|
||||
MODULE_ALIAS_CRYPTO("sm4-neon");
|
||||
MODULE_ALIAS_CRYPTO("sm4");
|
||||
MODULE_ALIAS_CRYPTO("ecb(sm4)");
|
||||
MODULE_ALIAS_CRYPTO("cbc(sm4)");
|
||||
MODULE_ALIAS_CRYPTO("cfb(sm4)");
|
||||
MODULE_ALIAS_CRYPTO("ctr(sm4)");
|
||||
MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -37,7 +37,7 @@ asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
|
||||
asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
|
||||
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
|
||||
unsigned char *aad, unsigned int alen);
|
||||
|
||||
|
@ -597,7 +597,9 @@ static int ctr_aes_crypt(struct skcipher_request *req)
|
||||
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
||||
*/
|
||||
if (nbytes) {
|
||||
cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
|
||||
memset(buf, 0, AES_BLOCK_SIZE);
|
||||
memcpy(buf, walk.src.virt.addr, nbytes);
|
||||
cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
|
||||
AES_BLOCK_SIZE, walk.iv);
|
||||
memcpy(walk.dst.virt.addr, buf, nbytes);
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
|
@ -693,9 +693,11 @@ static int ctr_paes_crypt(struct skcipher_request *req)
|
||||
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
||||
*/
|
||||
if (nbytes) {
|
||||
memset(buf, 0, AES_BLOCK_SIZE);
|
||||
memcpy(buf, walk.src.virt.addr, nbytes);
|
||||
while (1) {
|
||||
if (cpacf_kmctr(ctx->fc, ¶m, buf,
|
||||
walk.src.virt.addr, AES_BLOCK_SIZE,
|
||||
buf, AES_BLOCK_SIZE,
|
||||
walk.iv) == AES_BLOCK_SIZE)
|
||||
break;
|
||||
if (__paes_convert_key(ctx))
|
||||
|
@ -189,7 +189,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
|
||||
Processes 16 blocks in parallel.
|
||||
|
||||
config CRYPTO_SM4_AESNI_AVX_X86_64
|
||||
tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX)"
|
||||
tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_SIMD
|
||||
@ -197,7 +197,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64
|
||||
select CRYPTO_SM4
|
||||
help
|
||||
Length-preserving ciphers: SM4 cipher algorithms
|
||||
(OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes
|
||||
(OSCCA GB/T 32907-2016) with ECB, CBC, and CTR modes
|
||||
|
||||
Architecture: x86_64 using:
|
||||
- AES-NI (AES New Instructions)
|
||||
@ -210,7 +210,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64
|
||||
If unsure, say N.
|
||||
|
||||
config CRYPTO_SM4_AESNI_AVX2_X86_64
|
||||
tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX2)"
|
||||
tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX2)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_SIMD
|
||||
@ -219,7 +219,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64
|
||||
select CRYPTO_SM4_AESNI_AVX_X86_64
|
||||
help
|
||||
Length-preserving ciphers: SM4 cipher algorithms
|
||||
(OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes
|
||||
(OSCCA GB/T 32907-2016) with ECB, CBC, and CTR modes
|
||||
|
||||
Architecture: x86_64 using:
|
||||
- AES-NI (AES New Instructions)
|
||||
|
@ -2,8 +2,8 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
|
||||
* Supplemental SSE3 instructions.
|
||||
* Glue code for the SHA1 Secure Hash Algorithm assembler implementations
|
||||
* using SSSE3, AVX, AVX2, and SHA-NI instructions.
|
||||
*
|
||||
* This file is based on sha1_generic.c
|
||||
*
|
||||
@ -28,6 +28,9 @@
|
||||
#include <asm/simd.h>
|
||||
|
||||
static const struct x86_cpu_id module_cpu_ids[] = {
|
||||
#ifdef CONFIG_AS_SHA1_NI
|
||||
X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
|
||||
#endif
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* Glue code for the SHA256 Secure Hash Algorithm assembler
|
||||
* implementation using supplemental SSE3 / AVX / AVX2 instructions.
|
||||
* Glue code for the SHA256 Secure Hash Algorithm assembler implementations
|
||||
* using SSSE3, AVX, AVX2, and SHA-NI instructions.
|
||||
*
|
||||
* This file is based on sha256_generic.c
|
||||
*
|
||||
@ -45,6 +45,9 @@ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
|
||||
const u8 *data, int blocks);
|
||||
|
||||
static const struct x86_cpu_id module_cpu_ids[] = {
|
||||
#ifdef CONFIG_AS_SHA256_NI
|
||||
X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
|
||||
#endif
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
|
||||
|
@ -534,55 +534,3 @@ SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
|
||||
FRAME_END
|
||||
RET;
|
||||
SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
|
||||
|
||||
/*
|
||||
* void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
|
||||
* const u8 *src, u8 *iv)
|
||||
*/
|
||||
SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
|
||||
/* input:
|
||||
* %rdi: round key array, CTX
|
||||
* %rsi: dst (8 blocks)
|
||||
* %rdx: src (8 blocks)
|
||||
* %rcx: iv
|
||||
*/
|
||||
FRAME_BEGIN
|
||||
|
||||
/* Load input */
|
||||
vmovdqu (%rcx), RA0;
|
||||
vmovdqu 0 * 16(%rdx), RA1;
|
||||
vmovdqu 1 * 16(%rdx), RA2;
|
||||
vmovdqu 2 * 16(%rdx), RA3;
|
||||
vmovdqu 3 * 16(%rdx), RB0;
|
||||
vmovdqu 4 * 16(%rdx), RB1;
|
||||
vmovdqu 5 * 16(%rdx), RB2;
|
||||
vmovdqu 6 * 16(%rdx), RB3;
|
||||
|
||||
/* Update IV */
|
||||
vmovdqu 7 * 16(%rdx), RNOT;
|
||||
vmovdqu RNOT, (%rcx);
|
||||
|
||||
call __sm4_crypt_blk8;
|
||||
|
||||
vpxor (0 * 16)(%rdx), RA0, RA0;
|
||||
vpxor (1 * 16)(%rdx), RA1, RA1;
|
||||
vpxor (2 * 16)(%rdx), RA2, RA2;
|
||||
vpxor (3 * 16)(%rdx), RA3, RA3;
|
||||
vpxor (4 * 16)(%rdx), RB0, RB0;
|
||||
vpxor (5 * 16)(%rdx), RB1, RB1;
|
||||
vpxor (6 * 16)(%rdx), RB2, RB2;
|
||||
vpxor (7 * 16)(%rdx), RB3, RB3;
|
||||
|
||||
vmovdqu RA0, (0 * 16)(%rsi);
|
||||
vmovdqu RA1, (1 * 16)(%rsi);
|
||||
vmovdqu RA2, (2 * 16)(%rsi);
|
||||
vmovdqu RA3, (3 * 16)(%rsi);
|
||||
vmovdqu RB0, (4 * 16)(%rsi);
|
||||
vmovdqu RB1, (5 * 16)(%rsi);
|
||||
vmovdqu RB2, (6 * 16)(%rsi);
|
||||
vmovdqu RB3, (7 * 16)(%rsi);
|
||||
|
||||
vzeroall;
|
||||
FRAME_END
|
||||
RET;
|
||||
SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
|
||||
|
@ -439,58 +439,3 @@ SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
|
||||
FRAME_END
|
||||
RET;
|
||||
SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
|
||||
|
||||
/*
|
||||
* void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
|
||||
* const u8 *src, u8 *iv)
|
||||
*/
|
||||
SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
|
||||
/* input:
|
||||
* %rdi: round key array, CTX
|
||||
* %rsi: dst (16 blocks)
|
||||
* %rdx: src (16 blocks)
|
||||
* %rcx: iv
|
||||
*/
|
||||
FRAME_BEGIN
|
||||
|
||||
vzeroupper;
|
||||
|
||||
/* Load input */
|
||||
vmovdqu (%rcx), RNOTx;
|
||||
vinserti128 $1, (%rdx), RNOT, RA0;
|
||||
vmovdqu (0 * 32 + 16)(%rdx), RA1;
|
||||
vmovdqu (1 * 32 + 16)(%rdx), RA2;
|
||||
vmovdqu (2 * 32 + 16)(%rdx), RA3;
|
||||
vmovdqu (3 * 32 + 16)(%rdx), RB0;
|
||||
vmovdqu (4 * 32 + 16)(%rdx), RB1;
|
||||
vmovdqu (5 * 32 + 16)(%rdx), RB2;
|
||||
vmovdqu (6 * 32 + 16)(%rdx), RB3;
|
||||
|
||||
/* Update IV */
|
||||
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
|
||||
vmovdqu RNOTx, (%rcx);
|
||||
|
||||
call __sm4_crypt_blk16;
|
||||
|
||||
vpxor (0 * 32)(%rdx), RA0, RA0;
|
||||
vpxor (1 * 32)(%rdx), RA1, RA1;
|
||||
vpxor (2 * 32)(%rdx), RA2, RA2;
|
||||
vpxor (3 * 32)(%rdx), RA3, RA3;
|
||||
vpxor (4 * 32)(%rdx), RB0, RB0;
|
||||
vpxor (5 * 32)(%rdx), RB1, RB1;
|
||||
vpxor (6 * 32)(%rdx), RB2, RB2;
|
||||
vpxor (7 * 32)(%rdx), RB3, RB3;
|
||||
|
||||
vmovdqu RA0, (0 * 32)(%rsi);
|
||||
vmovdqu RA1, (1 * 32)(%rsi);
|
||||
vmovdqu RA2, (2 * 32)(%rsi);
|
||||
vmovdqu RA3, (3 * 32)(%rsi);
|
||||
vmovdqu RB0, (4 * 32)(%rsi);
|
||||
vmovdqu RB1, (5 * 32)(%rsi);
|
||||
vmovdqu RB2, (6 * 32)(%rsi);
|
||||
vmovdqu RB3, (7 * 32)(%rsi);
|
||||
|
||||
vzeroall;
|
||||
FRAME_END
|
||||
RET;
|
||||
SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
|
||||
|
@ -14,10 +14,6 @@ int sm4_cbc_encrypt(struct skcipher_request *req);
|
||||
int sm4_avx_cbc_decrypt(struct skcipher_request *req,
|
||||
unsigned int bsize, sm4_crypt_func func);
|
||||
|
||||
int sm4_cfb_encrypt(struct skcipher_request *req);
|
||||
int sm4_avx_cfb_decrypt(struct skcipher_request *req,
|
||||
unsigned int bsize, sm4_crypt_func func);
|
||||
|
||||
int sm4_avx_ctr_crypt(struct skcipher_request *req,
|
||||
unsigned int bsize, sm4_crypt_func func);
|
||||
|
||||
|
@ -23,8 +23,6 @@ asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
|
||||
const u8 *src, u8 *iv);
|
||||
asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
|
||||
const u8 *src, u8 *iv);
|
||||
asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
|
||||
const u8 *src, u8 *iv);
|
||||
|
||||
static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
@ -41,12 +39,6 @@ static int cbc_decrypt(struct skcipher_request *req)
|
||||
}
|
||||
|
||||
|
||||
static int cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return sm4_avx_cfb_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
|
||||
sm4_aesni_avx2_cfb_dec_blk16);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return sm4_avx_ctr_crypt(req, SM4_CRYPT16_BLOCK_SIZE,
|
||||
@ -87,24 +79,6 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
|
||||
.setkey = sm4_skcipher_setkey,
|
||||
.encrypt = sm4_cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__cfb(sm4)",
|
||||
.cra_driver_name = "__cfb-sm4-aesni-avx2",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct sm4_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = SM4_KEY_SIZE,
|
||||
.max_keysize = SM4_KEY_SIZE,
|
||||
.ivsize = SM4_BLOCK_SIZE,
|
||||
.chunksize = SM4_BLOCK_SIZE,
|
||||
.walksize = 16 * SM4_BLOCK_SIZE,
|
||||
.setkey = sm4_skcipher_setkey,
|
||||
.encrypt = sm4_cfb_encrypt,
|
||||
.decrypt = cfb_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__ctr(sm4)",
|
||||
|
@ -27,8 +27,6 @@ asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
|
||||
const u8 *src, u8 *iv);
|
||||
asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
|
||||
const u8 *src, u8 *iv);
|
||||
asmlinkage void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
|
||||
const u8 *src, u8 *iv);
|
||||
|
||||
static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
@ -188,116 +186,6 @@ static int cbc_decrypt(struct skcipher_request *req)
|
||||
sm4_aesni_avx_cbc_dec_blk8);
|
||||
}
|
||||
|
||||
int sm4_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) > 0) {
|
||||
u8 keystream[SM4_BLOCK_SIZE];
|
||||
const u8 *iv = walk.iv;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
|
||||
while (nbytes >= SM4_BLOCK_SIZE) {
|
||||
sm4_crypt_block(ctx->rkey_enc, keystream, iv);
|
||||
crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
|
||||
iv = dst;
|
||||
src += SM4_BLOCK_SIZE;
|
||||
dst += SM4_BLOCK_SIZE;
|
||||
nbytes -= SM4_BLOCK_SIZE;
|
||||
}
|
||||
if (iv != walk.iv)
|
||||
memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
|
||||
|
||||
/* tail */
|
||||
if (walk.nbytes == walk.total && nbytes > 0) {
|
||||
sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
|
||||
crypto_xor_cpy(dst, src, keystream, nbytes);
|
||||
nbytes = 0;
|
||||
}
|
||||
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sm4_cfb_encrypt);
|
||||
|
||||
int sm4_avx_cfb_decrypt(struct skcipher_request *req,
|
||||
unsigned int bsize, sm4_crypt_func func)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) > 0) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
while (nbytes >= bsize) {
|
||||
func(ctx->rkey_enc, dst, src, walk.iv);
|
||||
dst += bsize;
|
||||
src += bsize;
|
||||
nbytes -= bsize;
|
||||
}
|
||||
|
||||
while (nbytes >= SM4_BLOCK_SIZE) {
|
||||
u8 keystream[SM4_BLOCK_SIZE * 8];
|
||||
unsigned int nblocks = min(nbytes >> 4, 8u);
|
||||
|
||||
memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
|
||||
if (nblocks > 1)
|
||||
memcpy(&keystream[SM4_BLOCK_SIZE], src,
|
||||
(nblocks - 1) * SM4_BLOCK_SIZE);
|
||||
memcpy(walk.iv, src + (nblocks - 1) * SM4_BLOCK_SIZE,
|
||||
SM4_BLOCK_SIZE);
|
||||
|
||||
sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
|
||||
keystream, nblocks);
|
||||
|
||||
crypto_xor_cpy(dst, src, keystream,
|
||||
nblocks * SM4_BLOCK_SIZE);
|
||||
dst += nblocks * SM4_BLOCK_SIZE;
|
||||
src += nblocks * SM4_BLOCK_SIZE;
|
||||
nbytes -= nblocks * SM4_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
kernel_fpu_end();
|
||||
|
||||
/* tail */
|
||||
if (walk.nbytes == walk.total && nbytes > 0) {
|
||||
u8 keystream[SM4_BLOCK_SIZE];
|
||||
|
||||
sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
|
||||
crypto_xor_cpy(dst, src, keystream, nbytes);
|
||||
nbytes = 0;
|
||||
}
|
||||
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt);
|
||||
|
||||
static int cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
|
||||
sm4_aesni_avx_cfb_dec_blk8);
|
||||
}
|
||||
|
||||
int sm4_avx_ctr_crypt(struct skcipher_request *req,
|
||||
unsigned int bsize, sm4_crypt_func func)
|
||||
{
|
||||
@ -406,24 +294,6 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
|
||||
.setkey = sm4_skcipher_setkey,
|
||||
.encrypt = sm4_cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__cfb(sm4)",
|
||||
.cra_driver_name = "__cfb-sm4-aesni-avx",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct sm4_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = SM4_KEY_SIZE,
|
||||
.max_keysize = SM4_KEY_SIZE,
|
||||
.ivsize = SM4_BLOCK_SIZE,
|
||||
.chunksize = SM4_BLOCK_SIZE,
|
||||
.walksize = 8 * SM4_BLOCK_SIZE,
|
||||
.setkey = sm4_skcipher_setkey,
|
||||
.encrypt = sm4_cfb_encrypt,
|
||||
.decrypt = cfb_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__ctr(sm4)",
|
||||
|
@ -661,15 +661,6 @@ config CRYPTO_CBC
|
||||
|
||||
This block cipher mode is required for IPSec ESP (XFRM_ESP).
|
||||
|
||||
config CRYPTO_CFB
|
||||
tristate "CFB (Cipher Feedback)"
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
CFB (Cipher Feedback) mode (NIST SP800-38A)
|
||||
|
||||
This block cipher mode is required for TPM2 Cryptography.
|
||||
|
||||
config CRYPTO_CTR
|
||||
tristate "CTR (Counter)"
|
||||
select CRYPTO_SKCIPHER
|
||||
@ -735,20 +726,6 @@ config CRYPTO_LRW
|
||||
|
||||
See https://people.csail.mit.edu/rivest/pubs/LRW02.pdf
|
||||
|
||||
config CRYPTO_OFB
|
||||
tristate "OFB (Output Feedback)"
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
OFB (Output Feedback) mode (NIST SP800-38A)
|
||||
|
||||
This mode makes a block cipher into a synchronous
|
||||
stream cipher. It generates keystream blocks, which are then XORed
|
||||
with the plaintext blocks to get the ciphertext. Flipping a bit in the
|
||||
ciphertext produces a flipped bit in the plaintext at the same
|
||||
location. This property allows many error correcting codes to function
|
||||
normally even when applied before encryption.
|
||||
|
||||
config CRYPTO_PCBC
|
||||
tristate "PCBC (Propagating Cipher Block Chaining)"
|
||||
select CRYPTO_SKCIPHER
|
||||
|
@ -92,7 +92,6 @@ obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
|
||||
CFLAGS_blake2b_generic.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930
|
||||
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
|
||||
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
|
||||
obj-$(CONFIG_CRYPTO_CFB) += cfb.o
|
||||
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
|
||||
obj-$(CONFIG_CRYPTO_CTS) += cts.o
|
||||
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
|
||||
@ -186,7 +185,6 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
|
||||
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
|
||||
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
|
||||
obj-$(CONFIG_CRYPTO_ECC) += ecc.o
|
||||
obj-$(CONFIG_CRYPTO_ESSIV) += essiv.o
|
||||
obj-$(CONFIG_CRYPTO_CURVE25519) += curve25519-generic.o
|
||||
|
@ -1116,9 +1116,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg);
|
||||
void af_alg_free_resources(struct af_alg_async_req *areq)
|
||||
{
|
||||
struct sock *sk = areq->sk;
|
||||
struct af_alg_ctx *ctx;
|
||||
|
||||
af_alg_free_areq_sgls(areq);
|
||||
sock_kfree_s(sk, areq, areq->areqlen);
|
||||
|
||||
ctx = alg_sk(sk)->private;
|
||||
ctx->inflight = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_free_resources);
|
||||
|
||||
@ -1188,11 +1192,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
|
||||
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
|
||||
unsigned int areqlen)
|
||||
{
|
||||
struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
|
||||
struct af_alg_ctx *ctx = alg_sk(sk)->private;
|
||||
struct af_alg_async_req *areq;
|
||||
|
||||
/* Only one AIO request can be in flight. */
|
||||
if (ctx->inflight)
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
|
||||
if (unlikely(!areq))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ctx->inflight = true;
|
||||
|
||||
areq->areqlen = areqlen;
|
||||
areq->sk = sk;
|
||||
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
|
||||
|
@ -341,6 +341,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
|
||||
}
|
||||
|
||||
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
|
||||
!strcmp(q->cra_driver_name, alg->cra_driver_name) ||
|
||||
!strcmp(q->cra_name, alg->cra_driver_name))
|
||||
goto err;
|
||||
}
|
||||
|
@ -47,6 +47,52 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
return af_alg_sendmsg(sock, msg, size, ivsize);
|
||||
}
|
||||
|
||||
static int algif_skcipher_export(struct sock *sk, struct skcipher_request *req)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct crypto_skcipher *tfm;
|
||||
struct af_alg_ctx *ctx;
|
||||
struct alg_sock *pask;
|
||||
unsigned statesize;
|
||||
struct sock *psk;
|
||||
int err;
|
||||
|
||||
if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
|
||||
return 0;
|
||||
|
||||
ctx = ask->private;
|
||||
psk = ask->parent;
|
||||
pask = alg_sk(psk);
|
||||
tfm = pask->private;
|
||||
|
||||
statesize = crypto_skcipher_statesize(tfm);
|
||||
ctx->state = sock_kmalloc(sk, statesize, GFP_ATOMIC);
|
||||
if (!ctx->state)
|
||||
return -ENOMEM;
|
||||
|
||||
err = crypto_skcipher_export(req, ctx->state);
|
||||
if (err) {
|
||||
sock_kzfree_s(sk, ctx->state, statesize);
|
||||
ctx->state = NULL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void algif_skcipher_done(void *data, int err)
|
||||
{
|
||||
struct af_alg_async_req *areq = data;
|
||||
struct sock *sk = areq->sk;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = algif_skcipher_export(sk, &areq->cra_u.skcipher_req);
|
||||
|
||||
out:
|
||||
af_alg_async_cb(data, err);
|
||||
}
|
||||
|
||||
static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t ignored, int flags)
|
||||
{
|
||||
@ -58,6 +104,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
struct crypto_skcipher *tfm = pask->private;
|
||||
unsigned int bs = crypto_skcipher_chunksize(tfm);
|
||||
struct af_alg_async_req *areq;
|
||||
unsigned cflags = 0;
|
||||
int err = 0;
|
||||
size_t len = 0;
|
||||
|
||||
@ -82,8 +129,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
* If more buffers are to be expected to be processed, process only
|
||||
* full block size buffers.
|
||||
*/
|
||||
if (ctx->more || len < ctx->used)
|
||||
if (ctx->more || len < ctx->used) {
|
||||
len -= len % bs;
|
||||
cflags |= CRYPTO_SKCIPHER_REQ_NOTFINAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a per request TX SGL for this request which tracks the
|
||||
@ -107,6 +156,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
|
||||
areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
|
||||
|
||||
if (ctx->state) {
|
||||
err = crypto_skcipher_import(&areq->cra_u.skcipher_req,
|
||||
ctx->state);
|
||||
sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
|
||||
ctx->state = NULL;
|
||||
if (err)
|
||||
goto free;
|
||||
cflags |= CRYPTO_SKCIPHER_REQ_CONT;
|
||||
}
|
||||
|
||||
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
|
||||
/* AIO operation */
|
||||
sock_hold(sk);
|
||||
@ -116,8 +175,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
areq->outlen = len;
|
||||
|
||||
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
|
||||
cflags |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
af_alg_async_cb, areq);
|
||||
algif_skcipher_done, areq);
|
||||
err = ctx->enc ?
|
||||
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
|
||||
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
|
||||
@ -130,6 +190,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
} else {
|
||||
/* Synchronous operation */
|
||||
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
|
||||
cflags |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &ctx->wait);
|
||||
@ -137,8 +198,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
|
||||
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
|
||||
&ctx->wait);
|
||||
}
|
||||
|
||||
if (!err)
|
||||
err = algif_skcipher_export(
|
||||
sk, &areq->cra_u.skcipher_req);
|
||||
}
|
||||
|
||||
free:
|
||||
af_alg_free_resources(areq);
|
||||
@ -301,6 +365,8 @@ static void skcipher_sock_destruct(struct sock *sk)
|
||||
|
||||
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
|
||||
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
|
||||
if (ctx->state)
|
||||
sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
|
||||
sock_kfree_s(sk, ctx, ctx->len);
|
||||
af_alg_release_parent(sk);
|
||||
}
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define ARC4_ALIGN __alignof__(struct arc4_ctx)
|
||||
|
||||
static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
@ -23,10 +25,15 @@ static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key,
|
||||
}
|
||||
|
||||
static int crypto_arc4_crypt(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
u8 *dst, unsigned nbytes, u8 *iv, bool final)
|
||||
u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
|
||||
{
|
||||
struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm);
|
||||
|
||||
if (!(flags & CRYPTO_LSKCIPHER_FLAG_CONT))
|
||||
memcpy(siv, ctx, sizeof(*ctx));
|
||||
|
||||
ctx = (struct arc4_ctx *)siv;
|
||||
|
||||
arc4_crypt(ctx, dst, src, nbytes);
|
||||
return 0;
|
||||
}
|
||||
@ -45,9 +52,11 @@ static struct lskcipher_alg arc4_alg = {
|
||||
.co.base.cra_priority = 100,
|
||||
.co.base.cra_blocksize = ARC4_BLOCK_SIZE,
|
||||
.co.base.cra_ctxsize = sizeof(struct arc4_ctx),
|
||||
.co.base.cra_alignmask = ARC4_ALIGN - 1,
|
||||
.co.base.cra_module = THIS_MODULE,
|
||||
.co.min_keysize = ARC4_MIN_KEY_SIZE,
|
||||
.co.max_keysize = ARC4_MAX_KEY_SIZE,
|
||||
.co.statesize = sizeof(struct arc4_ctx),
|
||||
.setkey = crypto_arc4_setkey,
|
||||
.encrypt = crypto_arc4_crypt,
|
||||
.decrypt = crypto_arc4_crypt,
|
||||
|
@ -51,9 +51,10 @@ out:
|
||||
}
|
||||
|
||||
static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
u8 *dst, unsigned len, u8 *iv, bool final)
|
||||
u8 *dst, unsigned len, u8 *iv, u32 flags)
|
||||
{
|
||||
struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
|
||||
bool final = flags & CRYPTO_LSKCIPHER_FLAG_FINAL;
|
||||
struct crypto_lskcipher *cipher = *ctx;
|
||||
int rem;
|
||||
|
||||
@ -119,9 +120,10 @@ out:
|
||||
}
|
||||
|
||||
static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
u8 *dst, unsigned len, u8 *iv, bool final)
|
||||
u8 *dst, unsigned len, u8 *iv, u32 flags)
|
||||
{
|
||||
struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
|
||||
bool final = flags & CRYPTO_LSKCIPHER_FLAG_FINAL;
|
||||
struct crypto_lskcipher *cipher = *ctx;
|
||||
int rem;
|
||||
|
||||
|
254
crypto/cfb.c
254
crypto/cfb.c
@ -1,254 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* CFB: Cipher FeedBack mode
|
||||
*
|
||||
* Copyright (c) 2018 James.Bottomley@HansenPartnership.com
|
||||
*
|
||||
* CFB is a stream cipher mode which is layered on to a block
|
||||
* encryption scheme. It works very much like a one time pad where
|
||||
* the pad is generated initially from the encrypted IV and then
|
||||
* subsequently from the encrypted previous block of ciphertext. The
|
||||
* pad is XOR'd into the plain text to get the final ciphertext.
|
||||
*
|
||||
* The scheme of CFB is best described by wikipedia:
|
||||
*
|
||||
* https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
|
||||
*
|
||||
* Note that since the pad for both encryption and decryption is
|
||||
* generated by an encryption operation, CFB never uses the block
|
||||
* decryption function.
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/cipher.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
|
||||
{
|
||||
return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
|
||||
}
|
||||
|
||||
static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
|
||||
const u8 *src, u8 *dst)
|
||||
{
|
||||
crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
|
||||
}
|
||||
|
||||
/* final encrypt and decrypt is the same */
|
||||
static void crypto_cfb_final(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
|
||||
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
|
||||
u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
crypto_cfb_encrypt_one(tfm, iv, stream);
|
||||
crypto_xor_cpy(dst, stream, src, nbytes);
|
||||
}
|
||||
|
||||
static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, dst);
|
||||
crypto_xor(dst, src, bsize);
|
||||
iv = dst;
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 tmp[MAX_CIPHER_BLOCKSIZE];
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, tmp);
|
||||
crypto_xor(src, tmp, bsize);
|
||||
iv = src;
|
||||
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes >= bsize) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
err = crypto_cfb_encrypt_inplace(&walk, tfm);
|
||||
else
|
||||
err = crypto_cfb_encrypt_segment(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, err);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
crypto_cfb_final(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, dst);
|
||||
crypto_xor(dst, src, bsize);
|
||||
iv = src;
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 * const iv = walk->iv;
|
||||
u8 tmp[MAX_CIPHER_BLOCKSIZE];
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, tmp);
|
||||
memcpy(iv, src, bsize);
|
||||
crypto_xor(src, tmp, bsize);
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
if (walk->src.virt.addr == walk->dst.virt.addr)
|
||||
return crypto_cfb_decrypt_inplace(walk, tfm);
|
||||
else
|
||||
return crypto_cfb_decrypt_segment(walk, tfm);
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct skcipher_walk walk;
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes >= bsize) {
|
||||
err = crypto_cfb_decrypt_blocks(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, err);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
crypto_cfb_final(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
inst = skcipher_alloc_instance_simple(tmpl, tb);
|
||||
if (IS_ERR(inst))
|
||||
return PTR_ERR(inst);
|
||||
|
||||
alg = skcipher_ialg_simple(inst);
|
||||
|
||||
/* CFB mode is a stream cipher. */
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
|
||||
/*
|
||||
* To simplify the implementation, configure the skcipher walk to only
|
||||
* give a partial block at the very end, never earlier.
|
||||
*/
|
||||
inst->alg.chunksize = alg->cra_blocksize;
|
||||
|
||||
inst->alg.encrypt = crypto_cfb_encrypt;
|
||||
inst->alg.decrypt = crypto_cfb_decrypt;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
inst->free(inst);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_cfb_tmpl = {
|
||||
.name = "cfb",
|
||||
.create = crypto_cfb_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_cfb_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_cfb_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_cfb_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_cfb_tmpl);
|
||||
}
|
||||
|
||||
subsys_initcall(crypto_cfb_module_init);
|
||||
module_exit(crypto_cfb_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("CFB block cipher mode of operation");
|
||||
MODULE_ALIAS_CRYPTO("cfb");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
@ -111,9 +111,9 @@
|
||||
* as stdrng. Each DRBG receives an increasing cra_priority values the later
|
||||
* they are defined in this array (see drbg_fill_array).
|
||||
*
|
||||
* HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and
|
||||
* the SHA256 / AES 256 over other ciphers. Thus, the favored
|
||||
* DRBGs are the latest entries in this array.
|
||||
* HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and the
|
||||
* HMAC-SHA512 / SHA256 / AES 256 over other ciphers. Thus, the
|
||||
* favored DRBGs are the latest entries in this array.
|
||||
*/
|
||||
static const struct drbg_core drbg_cores[] = {
|
||||
#ifdef CONFIG_CRYPTO_DRBG_CTR
|
||||
@ -139,12 +139,6 @@ static const struct drbg_core drbg_cores[] = {
|
||||
#endif /* CONFIG_CRYPTO_DRBG_CTR */
|
||||
#ifdef CONFIG_CRYPTO_DRBG_HASH
|
||||
{
|
||||
.flags = DRBG_HASH | DRBG_STRENGTH128,
|
||||
.statelen = 55, /* 440 bits */
|
||||
.blocklen_bytes = 20,
|
||||
.cra_name = "sha1",
|
||||
.backend_cra_name = "sha1",
|
||||
}, {
|
||||
.flags = DRBG_HASH | DRBG_STRENGTH256,
|
||||
.statelen = 111, /* 888 bits */
|
||||
.blocklen_bytes = 48,
|
||||
@ -166,12 +160,6 @@ static const struct drbg_core drbg_cores[] = {
|
||||
#endif /* CONFIG_CRYPTO_DRBG_HASH */
|
||||
#ifdef CONFIG_CRYPTO_DRBG_HMAC
|
||||
{
|
||||
.flags = DRBG_HMAC | DRBG_STRENGTH128,
|
||||
.statelen = 20, /* block length of cipher */
|
||||
.blocklen_bytes = 20,
|
||||
.cra_name = "hmac_sha1",
|
||||
.backend_cra_name = "hmac(sha1)",
|
||||
}, {
|
||||
.flags = DRBG_HMAC | DRBG_STRENGTH256,
|
||||
.statelen = 48, /* block length of cipher */
|
||||
.blocklen_bytes = 48,
|
||||
@ -648,8 +636,6 @@ MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384");
|
||||
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384");
|
||||
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256");
|
||||
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256");
|
||||
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha1");
|
||||
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha1");
|
||||
|
||||
/* update function of HMAC DRBG as defined in 10.1.2.2 */
|
||||
static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
|
||||
@ -768,8 +754,6 @@ MODULE_ALIAS_CRYPTO("drbg_pr_sha384");
|
||||
MODULE_ALIAS_CRYPTO("drbg_nopr_sha384");
|
||||
MODULE_ALIAS_CRYPTO("drbg_pr_sha256");
|
||||
MODULE_ALIAS_CRYPTO("drbg_nopr_sha256");
|
||||
MODULE_ALIAS_CRYPTO("drbg_pr_sha1");
|
||||
MODULE_ALIAS_CRYPTO("drbg_nopr_sha1");
|
||||
|
||||
/*
|
||||
* Increment buffer
|
||||
@ -1475,11 +1459,11 @@ static int drbg_generate(struct drbg_state *drbg,
|
||||
int err = 0;
|
||||
pr_devel("DRBG: start to perform self test\n");
|
||||
if (drbg->core->flags & DRBG_HMAC)
|
||||
err = alg_test("drbg_pr_hmac_sha256",
|
||||
"drbg_pr_hmac_sha256", 0, 0);
|
||||
err = alg_test("drbg_pr_hmac_sha512",
|
||||
"drbg_pr_hmac_sha512", 0, 0);
|
||||
else if (drbg->core->flags & DRBG_CTR)
|
||||
err = alg_test("drbg_pr_ctr_aes128",
|
||||
"drbg_pr_ctr_aes128", 0, 0);
|
||||
err = alg_test("drbg_pr_ctr_aes256",
|
||||
"drbg_pr_ctr_aes256", 0, 0);
|
||||
else
|
||||
err = alg_test("drbg_pr_sha256",
|
||||
"drbg_pr_sha256", 0, 0);
|
||||
@ -2017,11 +2001,13 @@ static inline int __init drbg_healthcheck_sanity(void)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DRBG_CTR
|
||||
drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr);
|
||||
#elif defined CONFIG_CRYPTO_DRBG_HASH
|
||||
drbg_convert_tfm_core("drbg_nopr_ctr_aes256", &coreref, &pr);
|
||||
#endif
|
||||
#ifdef CONFIG_CRYPTO_DRBG_HASH
|
||||
drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
|
||||
#else
|
||||
drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr);
|
||||
#endif
|
||||
#ifdef CONFIG_CRYPTO_DRBG_HMAC
|
||||
drbg_convert_tfm_core("drbg_nopr_hmac_sha512", &coreref, &pr);
|
||||
#endif
|
||||
|
||||
drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
|
||||
|
10
crypto/ecb.c
10
crypto/ecb.c
@ -32,22 +32,24 @@ static int crypto_ecb_crypt(struct crypto_cipher *cipher, const u8 *src,
|
||||
}
|
||||
|
||||
static int crypto_ecb_encrypt2(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
u8 *dst, unsigned len, u8 *iv, bool final)
|
||||
u8 *dst, unsigned len, u8 *iv, u32 flags)
|
||||
{
|
||||
struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
|
||||
struct crypto_cipher *cipher = *ctx;
|
||||
|
||||
return crypto_ecb_crypt(cipher, src, dst, len, final,
|
||||
return crypto_ecb_crypt(cipher, src, dst, len,
|
||||
flags & CRYPTO_LSKCIPHER_FLAG_FINAL,
|
||||
crypto_cipher_alg(cipher)->cia_encrypt);
|
||||
}
|
||||
|
||||
static int crypto_ecb_decrypt2(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
u8 *dst, unsigned len, u8 *iv, bool final)
|
||||
u8 *dst, unsigned len, u8 *iv, u32 flags)
|
||||
{
|
||||
struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
|
||||
struct crypto_cipher *cipher = *ctx;
|
||||
|
||||
return crypto_ecb_crypt(cipher, src, dst, len, final,
|
||||
return crypto_ecb_crypt(cipher, src, dst, len,
|
||||
flags & CRYPTO_LSKCIPHER_FLAG_FINAL,
|
||||
crypto_cipher_alg(cipher)->cia_decrypt);
|
||||
}
|
||||
|
||||
|
@ -88,8 +88,9 @@ EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
|
||||
static int crypto_lskcipher_crypt_unaligned(
|
||||
struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
|
||||
u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
u8 *dst, unsigned len, u8 *iv, bool final))
|
||||
u8 *dst, unsigned len, u8 *iv, u32 flags))
|
||||
{
|
||||
unsigned statesize = crypto_lskcipher_statesize(tfm);
|
||||
unsigned ivsize = crypto_lskcipher_ivsize(tfm);
|
||||
unsigned bs = crypto_lskcipher_blocksize(tfm);
|
||||
unsigned cs = crypto_lskcipher_chunksize(tfm);
|
||||
@ -104,7 +105,7 @@ static int crypto_lskcipher_crypt_unaligned(
|
||||
if (!tiv)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(tiv, iv, ivsize);
|
||||
memcpy(tiv, iv, ivsize + statesize);
|
||||
|
||||
p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||
err = -ENOMEM;
|
||||
@ -119,7 +120,7 @@ static int crypto_lskcipher_crypt_unaligned(
|
||||
chunk &= ~(cs - 1);
|
||||
|
||||
memcpy(p, src, chunk);
|
||||
err = crypt(tfm, p, p, chunk, tiv, true);
|
||||
err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -132,7 +133,7 @@ static int crypto_lskcipher_crypt_unaligned(
|
||||
err = len ? -EINVAL : 0;
|
||||
|
||||
out:
|
||||
memcpy(iv, tiv, ivsize);
|
||||
memcpy(iv, tiv, ivsize + statesize);
|
||||
kfree_sensitive(p);
|
||||
kfree_sensitive(tiv);
|
||||
return err;
|
||||
@ -143,7 +144,7 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
int (*crypt)(struct crypto_lskcipher *tfm,
|
||||
const u8 *src, u8 *dst,
|
||||
unsigned len, u8 *iv,
|
||||
bool final))
|
||||
u32 flags))
|
||||
{
|
||||
unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
|
||||
struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
|
||||
@ -156,7 +157,7 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = crypt(tfm, src, dst, len, iv, true);
|
||||
ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
|
||||
|
||||
out:
|
||||
return crypto_lskcipher_errstat(alg, ret);
|
||||
@ -197,23 +198,45 @@ EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
|
||||
static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
|
||||
int (*crypt)(struct crypto_lskcipher *tfm,
|
||||
const u8 *src, u8 *dst,
|
||||
unsigned len, u8 *iv,
|
||||
bool final))
|
||||
unsigned len, u8 *ivs,
|
||||
u32 flags))
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
|
||||
u8 *ivs = skcipher_request_ctx(req);
|
||||
struct crypto_lskcipher *tfm = *ctx;
|
||||
struct skcipher_walk walk;
|
||||
unsigned ivsize;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
ivsize = crypto_lskcipher_ivsize(tfm);
|
||||
ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
|
||||
|
||||
flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
|
||||
flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
|
||||
else
|
||||
memcpy(ivs, req->iv, ivsize);
|
||||
|
||||
if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
|
||||
flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
|
||||
walk.nbytes, walk.iv, walk.nbytes == walk.total);
|
||||
walk.nbytes, ivs,
|
||||
flags & ~(walk.nbytes == walk.total ?
|
||||
0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
|
||||
err = skcipher_walk_done(&walk, err);
|
||||
flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
|
||||
}
|
||||
|
||||
if (flags & CRYPTO_LSKCIPHER_FLAG_FINAL)
|
||||
memcpy(req->iv, ivs, ivsize);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -276,6 +299,7 @@ static void __maybe_unused crypto_lskcipher_show(
|
||||
seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
|
||||
seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
|
||||
seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
|
||||
seq_printf(m, "statesize : %u\n", skcipher->co.statesize);
|
||||
}
|
||||
|
||||
static int __maybe_unused crypto_lskcipher_report(
|
||||
@ -618,6 +642,7 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple(
|
||||
inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
|
||||
inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
|
||||
inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
|
||||
inst->alg.co.statesize = cipher_alg->co.statesize;
|
||||
|
||||
/* Use struct crypto_lskcipher * by default, can be overridden */
|
||||
inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
|
||||
|
106
crypto/ofb.c
106
crypto/ofb.c
@ -1,106 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* OFB: Output FeedBack mode
|
||||
*
|
||||
* Copyright (C) 2018 ARM Limited or its affiliates.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/cipher.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static int crypto_ofb_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
|
||||
const unsigned int bsize = crypto_cipher_blocksize(cipher);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes >= bsize) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 * const iv = walk.iv;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
do {
|
||||
crypto_cipher_encrypt_one(cipher, iv, iv);
|
||||
crypto_xor_cpy(dst, src, iv, bsize);
|
||||
dst += bsize;
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
|
||||
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
|
||||
walk.nbytes);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
inst = skcipher_alloc_instance_simple(tmpl, tb);
|
||||
if (IS_ERR(inst))
|
||||
return PTR_ERR(inst);
|
||||
|
||||
alg = skcipher_ialg_simple(inst);
|
||||
|
||||
/* OFB mode is a stream cipher. */
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
|
||||
/*
|
||||
* To simplify the implementation, configure the skcipher walk to only
|
||||
* give a partial block at the very end, never earlier.
|
||||
*/
|
||||
inst->alg.chunksize = alg->cra_blocksize;
|
||||
|
||||
inst->alg.encrypt = crypto_ofb_crypt;
|
||||
inst->alg.decrypt = crypto_ofb_crypt;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
inst->free(inst);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ofb_tmpl = {
|
||||
.name = "ofb",
|
||||
.create = crypto_ofb_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_ofb_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_ofb_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_ofb_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_ofb_tmpl);
|
||||
}
|
||||
|
||||
subsys_initcall(crypto_ofb_module_init);
|
||||
module_exit(crypto_ofb_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("OFB block cipher mode of operation");
|
||||
MODULE_ALIAS_CRYPTO("ofb");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
@ -220,6 +220,8 @@ static int rsa_check_exponent_fips(MPI e)
|
||||
}
|
||||
|
||||
e_max = mpi_alloc(0);
|
||||
if (!e_max)
|
||||
return -ENOMEM;
|
||||
mpi_set_bit(e_max, 256);
|
||||
|
||||
if (mpi_cmp(e, e_max) >= 0) {
|
||||
|
@ -117,6 +117,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
||||
struct crypto_scomp *scomp = *tfm_ctx;
|
||||
void **ctx = acomp_request_ctx(req);
|
||||
struct scomp_scratch *scratch;
|
||||
unsigned int dlen;
|
||||
int ret;
|
||||
|
||||
if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
|
||||
@ -128,6 +129,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
||||
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
|
||||
req->dlen = SCOMP_SCRATCH_SIZE;
|
||||
|
||||
dlen = req->dlen;
|
||||
|
||||
scratch = raw_cpu_ptr(&scomp_scratch);
|
||||
spin_lock(&scratch->lock);
|
||||
|
||||
@ -145,6 +148,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
} else if (req->dlen > dlen) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
|
||||
1);
|
||||
|
@ -23,12 +23,8 @@ static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
|
||||
|
||||
static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
|
||||
return err;
|
||||
|
||||
if (err && err != -EINPROGRESS && err != -EBUSY)
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err)
|
||||
atomic64_inc(&shash_get_stat(alg)->err_cnt);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -698,6 +698,64 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
|
||||
|
||||
static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
u8 *ivs = skcipher_request_ctx(req);
|
||||
|
||||
ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
|
||||
|
||||
memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
|
||||
crypto_skcipher_statesize(tfm));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
u8 *ivs = skcipher_request_ctx(req);
|
||||
|
||||
ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
|
||||
|
||||
memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
|
||||
crypto_skcipher_statesize(tfm));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skcipher_noexport(struct skcipher_request *req, void *out)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skcipher_noimport(struct skcipher_request *req, const void *in)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int crypto_skcipher_export(struct skcipher_request *req, void *out)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
|
||||
if (alg->co.base.cra_type != &crypto_skcipher_type)
|
||||
return crypto_lskcipher_export(req, out);
|
||||
return alg->export(req, out);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_skcipher_export);
|
||||
|
||||
int crypto_skcipher_import(struct skcipher_request *req, const void *in)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
|
||||
if (alg->co.base.cra_type != &crypto_skcipher_type)
|
||||
return crypto_lskcipher_import(req, in);
|
||||
return alg->import(req, in);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_skcipher_import);
|
||||
|
||||
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
@ -713,8 +771,17 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
||||
|
||||
skcipher_set_needkey(skcipher);
|
||||
|
||||
if (tfm->__crt_alg->cra_type != &crypto_skcipher_type)
|
||||
if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
|
||||
unsigned am = crypto_skcipher_alignmask(skcipher);
|
||||
unsigned reqsize;
|
||||
|
||||
reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
|
||||
reqsize += crypto_skcipher_ivsize(skcipher);
|
||||
reqsize += crypto_skcipher_statesize(skcipher);
|
||||
crypto_skcipher_set_reqsize(skcipher, reqsize);
|
||||
|
||||
return crypto_init_lskcipher_ops_sg(tfm);
|
||||
}
|
||||
|
||||
if (alg->exit)
|
||||
skcipher->base.exit = crypto_skcipher_exit_tfm;
|
||||
@ -756,6 +823,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
|
||||
seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
|
||||
seq_printf(m, "walksize : %u\n", skcipher->walksize);
|
||||
seq_printf(m, "statesize : %u\n", skcipher->statesize);
|
||||
}
|
||||
|
||||
static int __maybe_unused crypto_skcipher_report(
|
||||
@ -870,7 +938,9 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
|
||||
struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
|
||||
struct crypto_alg *base = &alg->base;
|
||||
|
||||
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
|
||||
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
|
||||
alg->statesize > PAGE_SIZE / 2 ||
|
||||
(alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (!alg->chunksize)
|
||||
@ -899,6 +969,12 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
|
||||
if (!alg->walksize)
|
||||
alg->walksize = alg->chunksize;
|
||||
|
||||
if (!alg->statesize) {
|
||||
alg->import = skcipher_noimport;
|
||||
alg->export = skcipher_noexport;
|
||||
} else if (!(alg->import && alg->export))
|
||||
return -EINVAL;
|
||||
|
||||
base->cra_type = &crypto_skcipher_type;
|
||||
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
|
||||
|
||||
|
@ -1524,8 +1524,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
ret = min(ret, tcrypt_test("xts(aes)"));
|
||||
ret = min(ret, tcrypt_test("ctr(aes)"));
|
||||
ret = min(ret, tcrypt_test("rfc3686(ctr(aes))"));
|
||||
ret = min(ret, tcrypt_test("ofb(aes)"));
|
||||
ret = min(ret, tcrypt_test("cfb(aes)"));
|
||||
ret = min(ret, tcrypt_test("xctr(aes)"));
|
||||
break;
|
||||
|
||||
@ -1845,14 +1843,12 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
case 191:
|
||||
ret = min(ret, tcrypt_test("ecb(sm4)"));
|
||||
ret = min(ret, tcrypt_test("cbc(sm4)"));
|
||||
ret = min(ret, tcrypt_test("cfb(sm4)"));
|
||||
ret = min(ret, tcrypt_test("ctr(sm4)"));
|
||||
ret = min(ret, tcrypt_test("xts(sm4)"));
|
||||
break;
|
||||
case 192:
|
||||
ret = min(ret, tcrypt_test("ecb(aria)"));
|
||||
ret = min(ret, tcrypt_test("cbc(aria)"));
|
||||
ret = min(ret, tcrypt_test("cfb(aria)"));
|
||||
ret = min(ret, tcrypt_test("ctr(aria)"));
|
||||
break;
|
||||
case 200:
|
||||
@ -1880,10 +1876,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
break;
|
||||
|
||||
case 201:
|
||||
@ -2115,10 +2107,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_16);
|
||||
test_cipher_speed("cts(cbc(sm4))", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
|
||||
@ -2198,10 +2186,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cbc(aria)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cfb(aria)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cfb(aria)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
|
||||
@ -2436,14 +2420,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_16_24_32);
|
||||
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_20_28_36);
|
||||
test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0,
|
||||
@ -2463,18 +2439,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
test_acipher_speed("cfb(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
test_acipher_speed("ofb(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
break;
|
||||
|
||||
case 502:
|
||||
@ -2486,14 +2450,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_8);
|
||||
test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_8);
|
||||
test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_8);
|
||||
test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_8);
|
||||
test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_8);
|
||||
test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_8);
|
||||
break;
|
||||
|
||||
case 503:
|
||||
@ -2632,10 +2588,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_16);
|
||||
test_acipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_acipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_acipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_acipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
|
||||
@ -2682,14 +2634,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_16_24_32, num_mb);
|
||||
test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32, num_mb);
|
||||
test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32, num_mb);
|
||||
test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32, num_mb);
|
||||
test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32, num_mb);
|
||||
test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32, num_mb);
|
||||
test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
|
||||
0, speed_template_20_28_36, num_mb);
|
||||
test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
|
||||
@ -2709,18 +2653,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24, num_mb);
|
||||
test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24, num_mb);
|
||||
test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24, num_mb);
|
||||
test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24, num_mb);
|
||||
test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24, num_mb);
|
||||
break;
|
||||
|
||||
case 602:
|
||||
@ -2732,14 +2664,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
speed_template_8, num_mb);
|
||||
test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_8, num_mb);
|
||||
test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_8, num_mb);
|
||||
test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_8, num_mb);
|
||||
test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_8, num_mb);
|
||||
test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_8, num_mb);
|
||||
break;
|
||||
|
||||
case 603:
|
||||
|
@ -4608,25 +4608,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.einval_allowed = 1,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "cfb(aes)",
|
||||
.test = alg_test_skcipher,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.cipher = __VECS(aes_cfb_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cfb(aria)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(aria_cfb_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cfb(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_cfb_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "chacha20",
|
||||
.test = alg_test_skcipher,
|
||||
@ -4815,6 +4796,16 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.decomp = __VECS(deflate_decomp_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "deflate-iaa",
|
||||
.test = alg_test_comp,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.comp = {
|
||||
.comp = __VECS(deflate_comp_tv_template),
|
||||
.decomp = __VECS(deflate_decomp_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "dh",
|
||||
.test = alg_test_kpp,
|
||||
@ -4845,14 +4836,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
|
||||
}
|
||||
}, {
|
||||
/*
|
||||
* There is no need to specifically test the DRBG with every
|
||||
* backend cipher -- covered by drbg_nopr_hmac_sha256 test
|
||||
*/
|
||||
.alg = "drbg_nopr_hmac_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_nopr_hmac_sha256",
|
||||
.test = alg_test_drbg,
|
||||
@ -4861,7 +4844,10 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
|
||||
}
|
||||
}, {
|
||||
/* covered by drbg_nopr_hmac_sha256 test */
|
||||
/*
|
||||
* There is no need to specifically test the DRBG with every
|
||||
* backend cipher -- covered by drbg_nopr_hmac_sha512 test
|
||||
*/
|
||||
.alg = "drbg_nopr_hmac_sha384",
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
@ -4871,10 +4857,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.drbg = __VECS(drbg_nopr_hmac_sha512_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "drbg_nopr_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_nopr_sha256",
|
||||
.test = alg_test_drbg,
|
||||
@ -4906,10 +4888,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.alg = "drbg_pr_ctr_aes256",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_hmac_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_hmac_sha256",
|
||||
.test = alg_test_drbg,
|
||||
@ -4925,10 +4903,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.alg = "drbg_pr_hmac_sha512",
|
||||
.test = alg_test_null,
|
||||
.fips_allowed = 1,
|
||||
}, {
|
||||
.alg = "drbg_pr_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_sha256",
|
||||
.test = alg_test_drbg,
|
||||
@ -5419,26 +5393,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.hash = __VECS(nhpoly1305_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ofb(aes)",
|
||||
.test = alg_test_skcipher,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.cipher = __VECS(aes_ofb_tv_template)
|
||||
}
|
||||
}, {
|
||||
/* Same as ofb(aes) except the key is stored in
|
||||
* hardware secure memory which we reference by index
|
||||
*/
|
||||
.alg = "ofb(paes)",
|
||||
.test = alg_test_null,
|
||||
.fips_allowed = 1,
|
||||
}, {
|
||||
.alg = "ofb(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_ofb_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "pcbc(fcrypt)",
|
||||
.test = alg_test_skcipher,
|
||||
|
1148
crypto/testmgr.h
1148
crypto/testmgr.h
File diff suppressed because it is too large
Load Diff
@ -161,15 +161,13 @@ static int atmel_trng_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int atmel_trng_remove(struct platform_device *pdev)
|
||||
static void atmel_trng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct atmel_trng *trng = platform_get_drvdata(pdev);
|
||||
|
||||
atmel_trng_cleanup(trng);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev)
|
||||
@ -218,7 +216,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
|
||||
|
||||
static struct platform_driver atmel_trng_driver = {
|
||||
.probe = atmel_trng_probe,
|
||||
.remove = atmel_trng_remove,
|
||||
.remove_new = atmel_trng_remove,
|
||||
.driver = {
|
||||
.name = "atmel-trng",
|
||||
.pm = pm_ptr(&atmel_trng_pm_ops),
|
||||
|
@ -560,7 +560,7 @@ post_pm_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cctrng_remove(struct platform_device *pdev)
|
||||
static void cctrng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -570,8 +570,6 @@ static int cctrng_remove(struct platform_device *pdev)
|
||||
cc_trng_pm_fini(drvdata);
|
||||
|
||||
dev_info(dev, "ARM cctrng device terminated\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused cctrng_suspend(struct device *dev)
|
||||
@ -654,7 +652,7 @@ static struct platform_driver cctrng_driver = {
|
||||
.pm = &cctrng_pm,
|
||||
},
|
||||
.probe = cctrng_probe,
|
||||
.remove = cctrng_remove,
|
||||
.remove_new = cctrng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(cctrng_driver);
|
||||
|
@ -23,10 +23,13 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#define RNG_MODULE_NAME "hw_random"
|
||||
|
||||
#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
|
||||
|
||||
static struct hwrng *current_rng;
|
||||
/* the current rng has been explicitly chosen by user via sysfs */
|
||||
static int cur_rng_set_by_user;
|
||||
@ -58,7 +61,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
|
||||
|
||||
static size_t rng_buffer_size(void)
|
||||
{
|
||||
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
|
||||
return RNG_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
static void add_early_randomness(struct hwrng *rng)
|
||||
@ -209,6 +212,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
|
||||
static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||
size_t size, loff_t *offp)
|
||||
{
|
||||
u8 buffer[RNG_BUFFER_SIZE];
|
||||
ssize_t ret = 0;
|
||||
int err = 0;
|
||||
int bytes_read, len;
|
||||
@ -236,34 +240,37 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||
if (bytes_read < 0) {
|
||||
err = bytes_read;
|
||||
goto out_unlock_reading;
|
||||
}
|
||||
data_avail = bytes_read;
|
||||
}
|
||||
|
||||
if (!data_avail) {
|
||||
if (filp->f_flags & O_NONBLOCK) {
|
||||
} else if (bytes_read == 0 &&
|
||||
(filp->f_flags & O_NONBLOCK)) {
|
||||
err = -EAGAIN;
|
||||
goto out_unlock_reading;
|
||||
}
|
||||
} else {
|
||||
len = data_avail;
|
||||
|
||||
data_avail = bytes_read;
|
||||
}
|
||||
|
||||
len = data_avail;
|
||||
if (len) {
|
||||
if (len > size)
|
||||
len = size;
|
||||
|
||||
data_avail -= len;
|
||||
|
||||
if (copy_to_user(buf + ret, rng_buffer + data_avail,
|
||||
len)) {
|
||||
memcpy(buffer, rng_buffer + data_avail, len);
|
||||
}
|
||||
mutex_unlock(&reading_mutex);
|
||||
put_rng(rng);
|
||||
|
||||
if (len) {
|
||||
if (copy_to_user(buf + ret, buffer, len)) {
|
||||
err = -EFAULT;
|
||||
goto out_unlock_reading;
|
||||
goto out;
|
||||
}
|
||||
|
||||
size -= len;
|
||||
ret += len;
|
||||
}
|
||||
|
||||
mutex_unlock(&reading_mutex);
|
||||
put_rng(rng);
|
||||
|
||||
if (need_resched())
|
||||
schedule_timeout_interruptible(1);
|
||||
@ -274,6 +281,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||
}
|
||||
}
|
||||
out:
|
||||
memzero_explicit(buffer, sizeof(buffer));
|
||||
return ret ? : err;
|
||||
|
||||
out_unlock_reading:
|
||||
|
@ -173,7 +173,7 @@ err_pm_get:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int exynos_trng_remove(struct platform_device *pdev)
|
||||
static void exynos_trng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
|
||||
|
||||
@ -181,8 +181,6 @@ static int exynos_trng_remove(struct platform_device *pdev)
|
||||
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_trng_suspend(struct device *dev)
|
||||
@ -223,7 +221,7 @@ static struct platform_driver exynos_trng_driver = {
|
||||
.of_match_table = exynos_trng_dt_match,
|
||||
},
|
||||
.probe = exynos_trng_probe,
|
||||
.remove = exynos_trng_remove,
|
||||
.remove_new = exynos_trng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(exynos_trng_driver);
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
@ -114,15 +114,13 @@ static int ingenic_rng_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ingenic_rng_remove(struct platform_device *pdev)
|
||||
static void ingenic_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ingenic_rng *priv = platform_get_drvdata(pdev);
|
||||
|
||||
hwrng_unregister(&priv->rng);
|
||||
|
||||
writel(0, priv->base + RNG_REG_ERNG_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ingenic_rng_of_match[] = {
|
||||
@ -134,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
|
||||
|
||||
static struct platform_driver ingenic_rng_driver = {
|
||||
.probe = ingenic_rng_probe,
|
||||
.remove = ingenic_rng_remove,
|
||||
.remove_new = ingenic_rng_remove,
|
||||
.driver = {
|
||||
.name = "ingenic-rng",
|
||||
.of_match_table = ingenic_rng_of_match,
|
||||
|
@ -300,7 +300,7 @@ static int starfive_trng_probe(struct platform_device *pdev)
|
||||
ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name,
|
||||
(void *)trng);
|
||||
if (ret)
|
||||
return dev_err_probe(&pdev->dev, irq,
|
||||
return dev_err_probe(&pdev->dev, ret,
|
||||
"Failed to register interrupt handler\n");
|
||||
|
||||
trng->hclk = devm_clk_get(&pdev->dev, "hclk");
|
||||
@ -369,8 +369,12 @@ static int __maybe_unused starfive_trng_resume(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend,
|
||||
starfive_trng_resume);
|
||||
static const struct dev_pm_ops starfive_trng_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(starfive_trng_suspend,
|
||||
starfive_trng_resume)
|
||||
SET_RUNTIME_PM_OPS(starfive_trng_suspend,
|
||||
starfive_trng_resume, NULL)
|
||||
};
|
||||
|
||||
static const struct of_device_id trng_dt_ids[] __maybe_unused = {
|
||||
{ .compatible = "starfive,jh7110-trng" },
|
||||
|
@ -241,12 +241,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
|
||||
return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
|
||||
}
|
||||
|
||||
static int ks_sa_rng_remove(struct platform_device *pdev)
|
||||
static void ks_sa_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ks_sa_rng_dt_match[] = {
|
||||
@ -263,7 +261,7 @@ static struct platform_driver ks_sa_rng_driver = {
|
||||
.of_match_table = ks_sa_rng_dt_match,
|
||||
},
|
||||
.probe = ks_sa_rng_probe,
|
||||
.remove = ks_sa_rng_remove,
|
||||
.remove_new = ks_sa_rng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(ks_sa_rng_driver);
|
||||
|
@ -176,15 +176,13 @@ err_ioremap:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __exit mxc_rnga_remove(struct platform_device *pdev)
|
||||
static void __exit mxc_rnga_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
|
||||
|
||||
hwrng_unregister(&mxc_rng->rng);
|
||||
|
||||
clk_disable_unprepare(mxc_rng->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id mxc_rnga_of_match[] = {
|
||||
@ -199,7 +197,7 @@ static struct platform_driver mxc_rnga_driver = {
|
||||
.name = "mxc_rnga",
|
||||
.of_match_table = mxc_rnga_of_match,
|
||||
},
|
||||
.remove = __exit_p(mxc_rnga_remove),
|
||||
.remove_new = __exit_p(mxc_rnga_remove),
|
||||
};
|
||||
|
||||
module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe);
|
||||
|
@ -781,7 +781,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int n2rng_remove(struct platform_device *op)
|
||||
static void n2rng_remove(struct platform_device *op)
|
||||
{
|
||||
struct n2rng *np = platform_get_drvdata(op);
|
||||
|
||||
@ -790,8 +790,6 @@ static int n2rng_remove(struct platform_device *op)
|
||||
cancel_delayed_work_sync(&np->work);
|
||||
|
||||
sun4v_hvapi_unregister(HV_GRP_RNG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct n2rng_template n2_template = {
|
||||
@ -860,7 +858,7 @@ static struct platform_driver n2rng_driver = {
|
||||
.of_match_table = n2rng_match,
|
||||
},
|
||||
.probe = n2rng_probe,
|
||||
.remove = n2rng_remove,
|
||||
.remove_new = n2rng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(n2rng_driver);
|
||||
|
@ -126,15 +126,13 @@ static int npcm_rng_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int npcm_rng_remove(struct platform_device *pdev)
|
||||
static void npcm_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct npcm_rng *priv = platform_get_drvdata(pdev);
|
||||
|
||||
devm_hwrng_unregister(&pdev->dev, &priv->rng);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
@ -178,7 +176,7 @@ static struct platform_driver npcm_rng_driver = {
|
||||
.of_match_table = of_match_ptr(rng_dt_id),
|
||||
},
|
||||
.probe = npcm_rng_probe,
|
||||
.remove = npcm_rng_remove,
|
||||
.remove_new = npcm_rng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(npcm_rng_driver);
|
||||
|
@ -509,7 +509,7 @@ err_ioremap:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int omap_rng_remove(struct platform_device *pdev)
|
||||
static void omap_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
|
||||
|
||||
@ -521,8 +521,6 @@ static int omap_rng_remove(struct platform_device *pdev)
|
||||
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_disable_unprepare(priv->clk_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused omap_rng_suspend(struct device *dev)
|
||||
@ -560,7 +558,7 @@ static struct platform_driver omap_rng_driver = {
|
||||
.of_match_table = of_match_ptr(omap_rng_of_match),
|
||||
},
|
||||
.probe = omap_rng_probe,
|
||||
.remove = omap_rng_remove,
|
||||
.remove_new = omap_rng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(omap_rng_driver);
|
||||
|
@ -325,6 +325,7 @@ static int stm32_rng_init(struct hwrng *rng)
|
||||
(!(reg & RNG_CR_CONDRST)),
|
||||
10, 50000);
|
||||
if (err) {
|
||||
clk_disable_unprepare(priv->clk);
|
||||
dev_err((struct device *)priv->rng.priv,
|
||||
"%s: timeout %x!\n", __func__, reg);
|
||||
return -EINVAL;
|
||||
@ -362,11 +363,9 @@ static int stm32_rng_init(struct hwrng *rng)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_rng_remove(struct platform_device *ofdev)
|
||||
static void stm32_rng_remove(struct platform_device *ofdev)
|
||||
{
|
||||
pm_runtime_disable(&ofdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
|
||||
@ -557,7 +556,7 @@ static struct platform_driver stm32_rng_driver = {
|
||||
.of_match_table = stm32_rng_match,
|
||||
},
|
||||
.probe = stm32_rng_probe,
|
||||
.remove = stm32_rng_remove,
|
||||
.remove_new = stm32_rng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(stm32_rng_driver);
|
||||
|
@ -174,13 +174,11 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int timeriomem_rng_remove(struct platform_device *pdev)
|
||||
static void timeriomem_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
|
||||
|
||||
hrtimer_cancel(&priv->timer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id timeriomem_rng_match[] = {
|
||||
@ -195,7 +193,7 @@ static struct platform_driver timeriomem_rng_driver = {
|
||||
.of_match_table = timeriomem_rng_match,
|
||||
},
|
||||
.probe = timeriomem_rng_probe,
|
||||
.remove = timeriomem_rng_remove,
|
||||
.remove_new = timeriomem_rng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(timeriomem_rng_driver);
|
||||
|
@ -135,7 +135,7 @@ static int probe_common(struct virtio_device *vdev)
|
||||
if (!vi)
|
||||
return -ENOMEM;
|
||||
|
||||
vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL);
|
||||
vi->index = index = ida_alloc(&rng_index_ida, GFP_KERNEL);
|
||||
if (index < 0) {
|
||||
err = index;
|
||||
goto err_ida;
|
||||
@ -166,7 +166,7 @@ static int probe_common(struct virtio_device *vdev)
|
||||
return 0;
|
||||
|
||||
err_find:
|
||||
ida_simple_remove(&rng_index_ida, index);
|
||||
ida_free(&rng_index_ida, index);
|
||||
err_ida:
|
||||
kfree(vi);
|
||||
return err;
|
||||
@ -184,7 +184,7 @@ static void remove_common(struct virtio_device *vdev)
|
||||
hwrng_unregister(&vi->hwrng);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
ida_simple_remove(&rng_index_ida, vi->index);
|
||||
ida_free(&rng_index_ida, vi->index);
|
||||
kfree(vi);
|
||||
}
|
||||
|
||||
@ -208,7 +208,6 @@ static void virtrng_scan(struct virtio_device *vdev)
|
||||
vi->hwrng_register_done = true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int virtrng_freeze(struct virtio_device *vdev)
|
||||
{
|
||||
remove_common(vdev);
|
||||
@ -238,7 +237,6 @@ static int virtrng_restore(struct virtio_device *vdev)
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
|
||||
@ -252,10 +250,8 @@ static struct virtio_driver virtio_rng_driver = {
|
||||
.probe = virtrng_probe,
|
||||
.remove = virtrng_remove,
|
||||
.scan = virtrng_scan,
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.freeze = virtrng_freeze,
|
||||
.restore = virtrng_restore,
|
||||
#endif
|
||||
.freeze = pm_sleep_ptr(virtrng_freeze),
|
||||
.restore = pm_sleep_ptr(virtrng_restore),
|
||||
};
|
||||
|
||||
module_virtio_driver(virtio_rng_driver);
|
||||
|
@ -357,15 +357,13 @@ static int xgene_rng_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgene_rng_remove(struct platform_device *pdev)
|
||||
static void xgene_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = device_init_wakeup(&pdev->dev, 0);
|
||||
if (rc)
|
||||
dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id xgene_rng_of_match[] = {
|
||||
@ -377,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
|
||||
|
||||
static struct platform_driver xgene_rng_driver = {
|
||||
.probe = xgene_rng_probe,
|
||||
.remove = xgene_rng_remove,
|
||||
.remove_new = xgene_rng_remove,
|
||||
.driver = {
|
||||
.name = "xgene-rng",
|
||||
.of_match_table = xgene_rng_of_match,
|
||||
|
@ -306,6 +306,7 @@ config CRYPTO_DEV_SAHARA
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_ECB
|
||||
select CRYPTO_ENGINE
|
||||
help
|
||||
This option enables support for the SAHARA HW crypto accelerator
|
||||
found in some Freescale i.MX chips.
|
||||
|
@ -431,8 +431,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm);
|
||||
crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm));
|
||||
|
||||
memcpy(algt->fbname,
|
||||
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
|
||||
|
@ -405,9 +405,8 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm);
|
||||
|
||||
crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm));
|
||||
|
||||
memcpy(algt->fbname,
|
||||
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
|
||||
|
@ -30,33 +30,16 @@ static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_shash *xtfm;
|
||||
struct shash_desc *sdesc;
|
||||
size_t len;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(xtfm))
|
||||
return PTR_ERR(xtfm);
|
||||
|
||||
len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
|
||||
sdesc = kmalloc(len, GFP_KERNEL);
|
||||
if (!sdesc) {
|
||||
ret = -ENOMEM;
|
||||
goto err_hashkey_sdesc;
|
||||
}
|
||||
sdesc->tfm = xtfm;
|
||||
|
||||
ret = crypto_shash_init(sdesc);
|
||||
if (ret) {
|
||||
dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
|
||||
goto err_hashkey;
|
||||
}
|
||||
ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
|
||||
ret = crypto_shash_tfm_digest(xtfm, key, keylen, tfmctx->key);
|
||||
if (ret)
|
||||
dev_err(tfmctx->ss->dev, "shash finup error\n");
|
||||
err_hashkey:
|
||||
kfree(sdesc);
|
||||
err_hashkey_sdesc:
|
||||
dev_err(tfmctx->ss->dev, "shash digest error ret=%d\n", ret);
|
||||
|
||||
crypto_free_shash(xtfm);
|
||||
return ret;
|
||||
}
|
||||
|
@ -181,13 +181,6 @@ int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
|
||||
CRYPTO_FEEDBACK_MODE_128BIT_CFB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
@ -195,13 +188,6 @@ int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
|
||||
CRYPTO_FEEDBACK_MODE_64BIT_OFB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
|
@ -1209,26 +1209,6 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
|
||||
.base = {
|
||||
.cra_name = "cfb(aes)",
|
||||
.cra_driver_name = "cfb-aes-ppc4xx",
|
||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_IV_SIZE,
|
||||
.setkey = crypto4xx_setkey_aes_cfb,
|
||||
.encrypt = crypto4xx_encrypt_iv_stream,
|
||||
.decrypt = crypto4xx_decrypt_iv_stream,
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
|
||||
.base = {
|
||||
.cra_name = "ctr(aes)",
|
||||
@ -1289,26 +1269,6 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
|
||||
.base = {
|
||||
.cra_name = "ofb(aes)",
|
||||
.cra_driver_name = "ofb-aes-ppc4xx",
|
||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_IV_SIZE,
|
||||
.setkey = crypto4xx_setkey_aes_ofb,
|
||||
.encrypt = crypto4xx_encrypt_iv_stream,
|
||||
.decrypt = crypto4xx_decrypt_iv_stream,
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
|
||||
/* AEAD */
|
||||
{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
|
||||
|
@ -162,14 +162,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
struct scatterlist *dst_tmp);
|
||||
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
|
||||
|
@ -327,8 +327,8 @@ int meson_cipher_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm);
|
||||
crypto_skcipher_set_reqsize(sktfm, sizeof(struct meson_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -38,14 +38,12 @@ config CRYPTO_DEV_ASPEED_HACE_CRYPTO
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_ECB
|
||||
select CRYPTO_CBC
|
||||
select CRYPTO_CFB
|
||||
select CRYPTO_OFB
|
||||
select CRYPTO_CTR
|
||||
help
|
||||
Select here to enable Aspeed Hash & Crypto Engine (HACE)
|
||||
crypto driver.
|
||||
Supports AES/DES symmetric-key encryption and decryption
|
||||
with ECB/CBC/CFB/OFB/CTR options.
|
||||
with ECB/CBC/CTR options.
|
||||
|
||||
config CRYPTO_DEV_ASPEED_ACRY
|
||||
bool "Enable Aspeed ACRY RSA Engine"
|
||||
|
@ -473,30 +473,6 @@ static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
|
||||
HACE_CMD_TRIPLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
|
||||
HACE_CMD_TRIPLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
|
||||
HACE_CMD_TRIPLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
|
||||
HACE_CMD_TRIPLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
|
||||
HACE_CMD_TRIPLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
|
||||
@ -533,30 +509,6 @@ static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
|
||||
HACE_CMD_SINGLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
|
||||
HACE_CMD_SINGLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
|
||||
HACE_CMD_SINGLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
|
||||
HACE_CMD_SINGLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
|
||||
HACE_CMD_SINGLE_DES);
|
||||
}
|
||||
|
||||
static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
|
||||
@ -659,26 +611,6 @@ static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
|
||||
return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
|
||||
}
|
||||
|
||||
static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
|
||||
}
|
||||
|
||||
static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
|
||||
}
|
||||
|
||||
static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
|
||||
}
|
||||
|
||||
static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
|
||||
}
|
||||
|
||||
static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
|
||||
@ -790,60 +722,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = aspeed_aes_setkey,
|
||||
.encrypt = aspeed_aes_cfb_encrypt,
|
||||
.decrypt = aspeed_aes_cfb_decrypt,
|
||||
.init = aspeed_crypto_cra_init,
|
||||
.exit = aspeed_crypto_cra_exit,
|
||||
.base = {
|
||||
.cra_name = "cfb(aes)",
|
||||
.cra_driver_name = "aspeed-cfb-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
|
||||
.cra_alignmask = 0x0f,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
},
|
||||
.alg.skcipher.op = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = aspeed_aes_setkey,
|
||||
.encrypt = aspeed_aes_ofb_encrypt,
|
||||
.decrypt = aspeed_aes_ofb_decrypt,
|
||||
.init = aspeed_crypto_cra_init,
|
||||
.exit = aspeed_crypto_cra_exit,
|
||||
.base = {
|
||||
.cra_name = "ofb(aes)",
|
||||
.cra_driver_name = "aspeed-ofb-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
|
||||
.cra_alignmask = 0x0f,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
},
|
||||
.alg.skcipher.op = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
@ -897,60 +775,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = aspeed_des_setkey,
|
||||
.encrypt = aspeed_des_cfb_encrypt,
|
||||
.decrypt = aspeed_des_cfb_decrypt,
|
||||
.init = aspeed_crypto_cra_init,
|
||||
.exit = aspeed_crypto_cra_exit,
|
||||
.base = {
|
||||
.cra_name = "cfb(des)",
|
||||
.cra_driver_name = "aspeed-cfb-des",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
|
||||
.cra_alignmask = 0x0f,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
},
|
||||
.alg.skcipher.op = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = aspeed_des_setkey,
|
||||
.encrypt = aspeed_des_ofb_encrypt,
|
||||
.decrypt = aspeed_des_ofb_decrypt,
|
||||
.init = aspeed_crypto_cra_init,
|
||||
.exit = aspeed_crypto_cra_exit,
|
||||
.base = {
|
||||
.cra_name = "ofb(des)",
|
||||
.cra_driver_name = "aspeed-ofb-des",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
|
||||
.cra_alignmask = 0x0f,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
},
|
||||
.alg.skcipher.op = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
@ -1004,60 +828,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = aspeed_des_setkey,
|
||||
.encrypt = aspeed_tdes_cfb_encrypt,
|
||||
.decrypt = aspeed_tdes_cfb_decrypt,
|
||||
.init = aspeed_crypto_cra_init,
|
||||
.exit = aspeed_crypto_cra_exit,
|
||||
.base = {
|
||||
.cra_name = "cfb(des3_ede)",
|
||||
.cra_driver_name = "aspeed-cfb-tdes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
|
||||
.cra_alignmask = 0x0f,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
},
|
||||
.alg.skcipher.op = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
{
|
||||
.alg.skcipher.base = {
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = aspeed_des_setkey,
|
||||
.encrypt = aspeed_tdes_ofb_encrypt,
|
||||
.decrypt = aspeed_tdes_ofb_decrypt,
|
||||
.init = aspeed_crypto_cra_init,
|
||||
.exit = aspeed_crypto_cra_exit,
|
||||
.base = {
|
||||
.cra_name = "ofb(des3_ede)",
|
||||
.cra_driver_name = "aspeed-ofb-tdes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
|
||||
.cra_alignmask = 0x0f,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
},
|
||||
.alg.skcipher.op = {
|
||||
.do_one_request = aspeed_crypto_do_request,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
|
||||
|
@ -46,11 +46,6 @@
|
||||
#define ATMEL_AES_BUFFER_ORDER 2
|
||||
#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
|
||||
|
||||
#define CFB8_BLOCK_SIZE 1
|
||||
#define CFB16_BLOCK_SIZE 2
|
||||
#define CFB32_BLOCK_SIZE 4
|
||||
#define CFB64_BLOCK_SIZE 8
|
||||
|
||||
#define SIZE_IN_WORDS(x) ((x) >> 2)
|
||||
|
||||
/* AES flags */
|
||||
@ -60,12 +55,6 @@
|
||||
#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
|
||||
#define AES_FLAGS_ECB AES_MR_OPMOD_ECB
|
||||
#define AES_FLAGS_CBC AES_MR_OPMOD_CBC
|
||||
#define AES_FLAGS_OFB AES_MR_OPMOD_OFB
|
||||
#define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
|
||||
#define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
|
||||
#define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
|
||||
#define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
|
||||
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
|
||||
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
|
||||
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
|
||||
#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
|
||||
@ -87,7 +76,6 @@
|
||||
|
||||
struct atmel_aes_caps {
|
||||
bool has_dualbuff;
|
||||
bool has_cfb64;
|
||||
bool has_gcm;
|
||||
bool has_xts;
|
||||
bool has_authenc;
|
||||
@ -860,22 +848,6 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
|
||||
int err;
|
||||
|
||||
switch (dd->ctx->block_size) {
|
||||
case CFB8_BLOCK_SIZE:
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
||||
maxburst = 1;
|
||||
break;
|
||||
|
||||
case CFB16_BLOCK_SIZE:
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
maxburst = 1;
|
||||
break;
|
||||
|
||||
case CFB32_BLOCK_SIZE:
|
||||
case CFB64_BLOCK_SIZE:
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
maxburst = 1;
|
||||
break;
|
||||
|
||||
case AES_BLOCK_SIZE:
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
maxburst = dd->caps.max_burst_size;
|
||||
@ -1103,7 +1075,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
|
||||
}
|
||||
|
||||
/*
|
||||
* ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
|
||||
* ECB, CBC or CTR mode require the plaintext and ciphertext
|
||||
* to have a positve integer length.
|
||||
*/
|
||||
if (!req->cryptlen && opmode != AES_FLAGS_XTS)
|
||||
@ -1113,27 +1085,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
|
||||
!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
|
||||
return -EINVAL;
|
||||
|
||||
switch (mode & AES_FLAGS_OPMODE_MASK) {
|
||||
case AES_FLAGS_CFB8:
|
||||
ctx->block_size = CFB8_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
case AES_FLAGS_CFB16:
|
||||
ctx->block_size = CFB16_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
case AES_FLAGS_CFB32:
|
||||
ctx->block_size = CFB32_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
case AES_FLAGS_CFB64:
|
||||
ctx->block_size = CFB64_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
default:
|
||||
ctx->block_size = AES_BLOCK_SIZE;
|
||||
break;
|
||||
}
|
||||
ctx->block_size = AES_BLOCK_SIZE;
|
||||
ctx->is_aead = false;
|
||||
|
||||
rctx = skcipher_request_ctx(req);
|
||||
@ -1188,66 +1140,6 @@ static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CBC);
|
||||
}
|
||||
|
||||
static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_OFB);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB128);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB64);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB32);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB16);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CFB8);
|
||||
}
|
||||
|
||||
static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
|
||||
@ -1318,76 +1210,6 @@ static struct skcipher_alg aes_algs[] = {
|
||||
.decrypt = atmel_aes_cbc_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "ofb(aes)",
|
||||
.base.cra_driver_name = "atmel-ofb-aes",
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
||||
|
||||
.init = atmel_aes_init_tfm,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = atmel_aes_setkey,
|
||||
.encrypt = atmel_aes_ofb_encrypt,
|
||||
.decrypt = atmel_aes_ofb_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb(aes)",
|
||||
.base.cra_driver_name = "atmel-cfb-aes",
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
||||
|
||||
.init = atmel_aes_init_tfm,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = atmel_aes_setkey,
|
||||
.encrypt = atmel_aes_cfb_encrypt,
|
||||
.decrypt = atmel_aes_cfb_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb32(aes)",
|
||||
.base.cra_driver_name = "atmel-cfb32-aes",
|
||||
.base.cra_blocksize = CFB32_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
||||
|
||||
.init = atmel_aes_init_tfm,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = atmel_aes_setkey,
|
||||
.encrypt = atmel_aes_cfb32_encrypt,
|
||||
.decrypt = atmel_aes_cfb32_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb16(aes)",
|
||||
.base.cra_driver_name = "atmel-cfb16-aes",
|
||||
.base.cra_blocksize = CFB16_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
||||
|
||||
.init = atmel_aes_init_tfm,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = atmel_aes_setkey,
|
||||
.encrypt = atmel_aes_cfb16_encrypt,
|
||||
.decrypt = atmel_aes_cfb16_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb8(aes)",
|
||||
.base.cra_driver_name = "atmel-cfb8-aes",
|
||||
.base.cra_blocksize = CFB8_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
||||
|
||||
.init = atmel_aes_init_tfm,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = atmel_aes_setkey,
|
||||
.encrypt = atmel_aes_cfb8_encrypt,
|
||||
.decrypt = atmel_aes_cfb8_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "ctr(aes)",
|
||||
.base.cra_driver_name = "atmel-ctr-aes",
|
||||
@ -1404,21 +1226,6 @@ static struct skcipher_alg aes_algs[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct skcipher_alg aes_cfb64_alg = {
|
||||
.base.cra_name = "cfb64(aes)",
|
||||
.base.cra_driver_name = "atmel-cfb64-aes",
|
||||
.base.cra_blocksize = CFB64_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
||||
|
||||
.init = atmel_aes_init_tfm,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = atmel_aes_setkey,
|
||||
.encrypt = atmel_aes_cfb64_encrypt,
|
||||
.decrypt = atmel_aes_cfb64_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
};
|
||||
|
||||
|
||||
/* gcm aead functions */
|
||||
|
||||
@ -2407,9 +2214,6 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
|
||||
if (dd->caps.has_gcm)
|
||||
crypto_unregister_aead(&aes_gcm_alg);
|
||||
|
||||
if (dd->caps.has_cfb64)
|
||||
crypto_unregister_skcipher(&aes_cfb64_alg);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
|
||||
crypto_unregister_skcipher(&aes_algs[i]);
|
||||
}
|
||||
@ -2434,14 +2238,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
|
||||
goto err_aes_algs;
|
||||
}
|
||||
|
||||
if (dd->caps.has_cfb64) {
|
||||
atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
|
||||
|
||||
err = crypto_register_skcipher(&aes_cfb64_alg);
|
||||
if (err)
|
||||
goto err_aes_cfb64_alg;
|
||||
}
|
||||
|
||||
if (dd->caps.has_gcm) {
|
||||
atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
|
||||
|
||||
@ -2482,8 +2278,6 @@ err_aes_authenc_alg:
|
||||
err_aes_xts_alg:
|
||||
crypto_unregister_aead(&aes_gcm_alg);
|
||||
err_aes_gcm_alg:
|
||||
crypto_unregister_skcipher(&aes_cfb64_alg);
|
||||
err_aes_cfb64_alg:
|
||||
i = ARRAY_SIZE(aes_algs);
|
||||
err_aes_algs:
|
||||
for (j = 0; j < i; j++)
|
||||
@ -2495,7 +2289,6 @@ err_aes_algs:
|
||||
static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
{
|
||||
dd->caps.has_dualbuff = 0;
|
||||
dd->caps.has_cfb64 = 0;
|
||||
dd->caps.has_gcm = 0;
|
||||
dd->caps.has_xts = 0;
|
||||
dd->caps.has_authenc = 0;
|
||||
@ -2507,7 +2300,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
case 0x600:
|
||||
case 0x500:
|
||||
dd->caps.has_dualbuff = 1;
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.has_gcm = 1;
|
||||
dd->caps.has_xts = 1;
|
||||
dd->caps.has_authenc = 1;
|
||||
@ -2515,13 +2307,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
break;
|
||||
case 0x200:
|
||||
dd->caps.has_dualbuff = 1;
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.has_gcm = 1;
|
||||
dd->caps.max_burst_size = 4;
|
||||
break;
|
||||
case 0x130:
|
||||
dd->caps.has_dualbuff = 1;
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.max_burst_size = 4;
|
||||
break;
|
||||
case 0x120:
|
||||
|
@ -45,11 +45,6 @@
|
||||
#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
|
||||
#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
|
||||
#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
|
||||
#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
|
||||
#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
|
||||
#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
|
||||
#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
|
||||
#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
|
||||
|
||||
#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
|
||||
|
||||
@ -60,13 +55,8 @@
|
||||
|
||||
#define ATMEL_TDES_QUEUE_LENGTH 50
|
||||
|
||||
#define CFB8_BLOCK_SIZE 1
|
||||
#define CFB16_BLOCK_SIZE 2
|
||||
#define CFB32_BLOCK_SIZE 4
|
||||
|
||||
struct atmel_tdes_caps {
|
||||
bool has_dma;
|
||||
u32 has_cfb_3keys;
|
||||
};
|
||||
|
||||
struct atmel_tdes_dev;
|
||||
@ -376,7 +366,6 @@ static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
|
||||
dma_addr_t dma_addr_in,
|
||||
dma_addr_t dma_addr_out, int length)
|
||||
{
|
||||
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
|
||||
int len32;
|
||||
|
||||
dd->dma_size = length;
|
||||
@ -386,19 +375,7 @@ static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
|
||||
case TDES_FLAGS_CFB8:
|
||||
len32 = DIV_ROUND_UP(length, sizeof(u8));
|
||||
break;
|
||||
|
||||
case TDES_FLAGS_CFB16:
|
||||
len32 = DIV_ROUND_UP(length, sizeof(u16));
|
||||
break;
|
||||
|
||||
default:
|
||||
len32 = DIV_ROUND_UP(length, sizeof(u32));
|
||||
break;
|
||||
}
|
||||
len32 = DIV_ROUND_UP(length, sizeof(u32));
|
||||
|
||||
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
|
||||
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
|
||||
@ -419,7 +396,6 @@ static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
|
||||
dma_addr_t dma_addr_in,
|
||||
dma_addr_t dma_addr_out, int length)
|
||||
{
|
||||
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
|
||||
struct scatterlist sg[2];
|
||||
struct dma_async_tx_descriptor *in_desc, *out_desc;
|
||||
enum dma_slave_buswidth addr_width;
|
||||
@ -431,19 +407,7 @@ static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
|
||||
case TDES_FLAGS_CFB8:
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
||||
break;
|
||||
|
||||
case TDES_FLAGS_CFB16:
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
break;
|
||||
|
||||
default:
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
break;
|
||||
}
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
|
||||
dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
|
||||
@ -680,39 +644,11 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
|
||||
if (!req->cryptlen)
|
||||
return 0;
|
||||
|
||||
switch (mode & TDES_FLAGS_OPMODE_MASK) {
|
||||
case TDES_FLAGS_CFB8:
|
||||
if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
|
||||
dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->block_size = CFB8_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
case TDES_FLAGS_CFB16:
|
||||
if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
|
||||
dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->block_size = CFB16_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
case TDES_FLAGS_CFB32:
|
||||
if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
|
||||
dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->block_size = CFB32_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
|
||||
dev_dbg(dev, "request size is not exact amount of DES blocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->block_size = DES_BLOCK_SIZE;
|
||||
break;
|
||||
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
|
||||
dev_dbg(dev, "request size is not exact amount of DES blocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->block_size = DES_BLOCK_SIZE;
|
||||
|
||||
rctx->mode = mode;
|
||||
|
||||
@ -832,55 +768,6 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
|
||||
}
|
||||
static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
|
||||
}
|
||||
|
||||
static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
|
||||
}
|
||||
|
||||
static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
|
||||
}
|
||||
|
||||
static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
|
||||
}
|
||||
|
||||
static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
|
||||
}
|
||||
|
||||
static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
@ -931,71 +818,6 @@ static struct skcipher_alg tdes_algs[] = {
|
||||
.encrypt = atmel_tdes_cbc_encrypt,
|
||||
.decrypt = atmel_tdes_cbc_decrypt,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb(des)",
|
||||
.base.cra_driver_name = "atmel-cfb-des",
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_alignmask = 0x7,
|
||||
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_des_setkey,
|
||||
.encrypt = atmel_tdes_cfb_encrypt,
|
||||
.decrypt = atmel_tdes_cfb_decrypt,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb8(des)",
|
||||
.base.cra_driver_name = "atmel-cfb8-des",
|
||||
.base.cra_blocksize = CFB8_BLOCK_SIZE,
|
||||
.base.cra_alignmask = 0,
|
||||
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_des_setkey,
|
||||
.encrypt = atmel_tdes_cfb8_encrypt,
|
||||
.decrypt = atmel_tdes_cfb8_decrypt,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb16(des)",
|
||||
.base.cra_driver_name = "atmel-cfb16-des",
|
||||
.base.cra_blocksize = CFB16_BLOCK_SIZE,
|
||||
.base.cra_alignmask = 0x1,
|
||||
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_des_setkey,
|
||||
.encrypt = atmel_tdes_cfb16_encrypt,
|
||||
.decrypt = atmel_tdes_cfb16_decrypt,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "cfb32(des)",
|
||||
.base.cra_driver_name = "atmel-cfb32-des",
|
||||
.base.cra_blocksize = CFB32_BLOCK_SIZE,
|
||||
.base.cra_alignmask = 0x3,
|
||||
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_des_setkey,
|
||||
.encrypt = atmel_tdes_cfb32_encrypt,
|
||||
.decrypt = atmel_tdes_cfb32_decrypt,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "ofb(des)",
|
||||
.base.cra_driver_name = "atmel-ofb-des",
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_alignmask = 0x7,
|
||||
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_des_setkey,
|
||||
.encrypt = atmel_tdes_ofb_encrypt,
|
||||
.decrypt = atmel_tdes_ofb_decrypt,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "ecb(des3_ede)",
|
||||
.base.cra_driver_name = "atmel-ecb-tdes",
|
||||
@ -1021,19 +843,6 @@ static struct skcipher_alg tdes_algs[] = {
|
||||
.decrypt = atmel_tdes_cbc_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
},
|
||||
{
|
||||
.base.cra_name = "ofb(des3_ede)",
|
||||
.base.cra_driver_name = "atmel-ofb-tdes",
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_alignmask = 0x7,
|
||||
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
.encrypt = atmel_tdes_ofb_encrypt,
|
||||
.decrypt = atmel_tdes_ofb_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
},
|
||||
};
|
||||
|
||||
static void atmel_tdes_queue_task(unsigned long data)
|
||||
@ -1121,14 +930,12 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
|
||||
{
|
||||
|
||||
dd->caps.has_dma = 0;
|
||||
dd->caps.has_cfb_3keys = 0;
|
||||
|
||||
/* keep only major version number */
|
||||
switch (dd->hw_version & 0xf00) {
|
||||
case 0x800:
|
||||
case 0x700:
|
||||
dd->caps.has_dma = 1;
|
||||
dd->caps.has_cfb_3keys = 1;
|
||||
break;
|
||||
case 0x600:
|
||||
break;
|
||||
|
@ -1535,7 +1535,8 @@ static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
|
||||
crypto_skcipher_set_reqsize(tfm,
|
||||
sizeof(struct artpec6_crypto_request_context));
|
||||
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
|
||||
|
||||
return 0;
|
||||
@ -1551,7 +1552,8 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
|
||||
if (IS_ERR(ctx->fallback))
|
||||
return PTR_ERR(ctx->fallback);
|
||||
|
||||
tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
|
||||
crypto_skcipher_set_reqsize(tfm,
|
||||
sizeof(struct artpec6_crypto_request_context));
|
||||
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
|
||||
|
||||
return 0;
|
||||
@ -1561,7 +1563,8 @@ static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
|
||||
crypto_skcipher_set_reqsize(tfm,
|
||||
sizeof(struct artpec6_crypto_request_context));
|
||||
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
|
||||
|
||||
return 0;
|
||||
@ -1571,7 +1574,8 @@ static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
|
||||
crypto_skcipher_set_reqsize(tfm,
|
||||
sizeof(struct artpec6_crypto_request_context));
|
||||
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
|
||||
|
||||
return 0;
|
||||
|
@ -3514,25 +3514,6 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
},
|
||||
|
||||
/* SKCIPHER algorithms. */
|
||||
{
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.alg.skcipher = {
|
||||
.base.cra_name = "ofb(des)",
|
||||
.base.cra_driver_name = "ofb-des-iproc",
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_info = {
|
||||
.alg = CIPHER_ALG_DES,
|
||||
.mode = CIPHER_MODE_OFB,
|
||||
},
|
||||
.auth_info = {
|
||||
.alg = HASH_ALG_NONE,
|
||||
.mode = HASH_MODE_NONE,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.alg.skcipher = {
|
||||
@ -3571,25 +3552,6 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.mode = HASH_MODE_NONE,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.alg.skcipher = {
|
||||
.base.cra_name = "ofb(des3_ede)",
|
||||
.base.cra_driver_name = "ofb-des3-iproc",
|
||||
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_info = {
|
||||
.alg = CIPHER_ALG_3DES,
|
||||
.mode = CIPHER_MODE_OFB,
|
||||
},
|
||||
.auth_info = {
|
||||
.alg = HASH_ALG_NONE,
|
||||
.mode = HASH_MODE_NONE,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.alg.skcipher = {
|
||||
@ -3628,25 +3590,6 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.mode = HASH_MODE_NONE,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.alg.skcipher = {
|
||||
.base.cra_name = "ofb(aes)",
|
||||
.base.cra_driver_name = "ofb-aes-iproc",
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_info = {
|
||||
.alg = CIPHER_ALG_AES,
|
||||
.mode = CIPHER_MODE_OFB,
|
||||
},
|
||||
.auth_info = {
|
||||
.alg = HASH_ALG_NONE,
|
||||
.mode = HASH_MODE_NONE,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.alg.skcipher = {
|
||||
|
@ -311,12 +311,6 @@ static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
return cvm_setkey(cipher, key, keylen, AES_ECB);
|
||||
}
|
||||
|
||||
static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
return cvm_setkey(cipher, key, keylen, AES_CFB);
|
||||
}
|
||||
|
||||
static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
@ -391,24 +385,6 @@ static struct skcipher_alg algs[] = { {
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
.init = cvm_enc_dec_init,
|
||||
}, {
|
||||
.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_priority = 4001,
|
||||
.base.cra_name = "cfb(aes)",
|
||||
.base.cra_driver_name = "cavium-cfb-aes",
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = cvm_cfb_aes_setkey,
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
.init = cvm_enc_dec_init,
|
||||
}, {
|
||||
.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
|
@ -419,25 +419,6 @@ static struct skcipher_alg nitrox_skciphers[] = { {
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "cfb(aes)",
|
||||
.cra_driver_name = "n5_cfb(aes)",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = nitrox_aes_setkey,
|
||||
.encrypt = nitrox_aes_encrypt,
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "xts(aes)",
|
||||
|
@ -266,24 +266,6 @@ static struct ccp_aes_def aes_algs[] = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.alg_defaults = &ccp_aes_defaults,
|
||||
},
|
||||
{
|
||||
.mode = CCP_AES_MODE_CFB,
|
||||
.version = CCP_VERSION(3, 0),
|
||||
.name = "cfb(aes)",
|
||||
.driver_name = "cfb-aes-ccp",
|
||||
.blocksize = 1,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.alg_defaults = &ccp_aes_defaults,
|
||||
},
|
||||
{
|
||||
.mode = CCP_AES_MODE_OFB,
|
||||
.version = CCP_VERSION(3, 0),
|
||||
.name = "ofb(aes)",
|
||||
.driver_name = "ofb-aes-ccp",
|
||||
.blocksize = 1,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.alg_defaults = &ccp_aes_defaults,
|
||||
},
|
||||
{
|
||||
.mode = CCP_AES_MODE_CTR,
|
||||
.version = CCP_VERSION(3, 0),
|
||||
|
@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
|
||||
|
||||
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
|
||||
dir);
|
||||
if (dma_mapping_error(wa->dev, wa->dma.address))
|
||||
if (dma_mapping_error(wa->dev, wa->dma.address)) {
|
||||
kfree(wa->address);
|
||||
wa->address = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
wa->dma.length = len;
|
||||
}
|
||||
|
@ -2569,9 +2569,13 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
|
||||
|
||||
alg = &tmpl->template_aead;
|
||||
|
||||
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
|
||||
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
tmpl->driver_name);
|
||||
if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
tmpl->name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
alg->base.cra_module = THIS_MODULE;
|
||||
alg->base.cra_priority = CC_CRA_PRIO;
|
||||
|
||||
|
@ -1079,24 +1079,6 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "ofb(paes)",
|
||||
.driver_name = "ofb-paes-ccree",
|
||||
.blocksize = AES_BLOCK_SIZE,
|
||||
.template_skcipher = {
|
||||
.setkey = cc_cipher_sethkey,
|
||||
.encrypt = cc_cipher_encrypt,
|
||||
.decrypt = cc_cipher_decrypt,
|
||||
.min_keysize = CC_HW_KEY_SIZE,
|
||||
.max_keysize = CC_HW_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_mode = DRV_CIPHER_OFB,
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "cts(cbc(paes))",
|
||||
.driver_name = "cts-cbc-paes-ccree",
|
||||
@ -1205,23 +1187,6 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.min_hw_rev = CC_HW_REV_630,
|
||||
.std_body = CC_STD_NIST,
|
||||
},
|
||||
{
|
||||
.name = "ofb(aes)",
|
||||
.driver_name = "ofb-aes-ccree",
|
||||
.blocksize = 1,
|
||||
.template_skcipher = {
|
||||
.setkey = cc_cipher_setkey,
|
||||
.encrypt = cc_cipher_encrypt,
|
||||
.decrypt = cc_cipher_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_mode = DRV_CIPHER_OFB,
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_630,
|
||||
.std_body = CC_STD_NIST,
|
||||
},
|
||||
{
|
||||
.name = "cts(cbc(aes))",
|
||||
.driver_name = "cts-cbc-aes-ccree",
|
||||
@ -1427,9 +1392,13 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
|
||||
|
||||
memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
|
||||
|
||||
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
|
||||
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
tmpl->driver_name);
|
||||
if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
tmpl->name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
alg->base.cra_module = THIS_MODULE;
|
||||
alg->base.cra_priority = CC_CRA_PRIO;
|
||||
alg->base.cra_blocksize = tmpl->blocksize;
|
||||
|
@ -332,8 +332,8 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm);
|
||||
crypto_skcipher_set_reqsize(sktfm, sizeof(struct sl3516_ce_cipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(op->fallback_tfm));
|
||||
|
||||
dev_info(op->ce->dev, "Fallback for %s is %s\n",
|
||||
crypto_tfm_alg_driver_name(&sktfm->base),
|
||||
|
@ -2096,16 +2096,6 @@ static inline int hifn_encrypt_aes_cbc(struct skcipher_request *req)
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
|
||||
}
|
||||
static inline int hifn_encrypt_aes_cfb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
|
||||
}
|
||||
static inline int hifn_encrypt_aes_ofb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
|
||||
}
|
||||
|
||||
/*
|
||||
* AES decryption functions.
|
||||
@ -2120,16 +2110,6 @@ static inline int hifn_decrypt_aes_cbc(struct skcipher_request *req)
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
|
||||
}
|
||||
static inline int hifn_decrypt_aes_cfb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
|
||||
}
|
||||
static inline int hifn_decrypt_aes_ofb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
|
||||
}
|
||||
|
||||
/*
|
||||
* DES ecryption functions.
|
||||
@ -2144,16 +2124,6 @@ static inline int hifn_encrypt_des_cbc(struct skcipher_request *req)
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
|
||||
}
|
||||
static inline int hifn_encrypt_des_cfb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
|
||||
}
|
||||
static inline int hifn_encrypt_des_ofb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
|
||||
}
|
||||
|
||||
/*
|
||||
* DES decryption functions.
|
||||
@ -2168,16 +2138,6 @@ static inline int hifn_decrypt_des_cbc(struct skcipher_request *req)
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
|
||||
}
|
||||
static inline int hifn_decrypt_des_cfb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
|
||||
}
|
||||
static inline int hifn_decrypt_des_ofb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
|
||||
}
|
||||
|
||||
/*
|
||||
* 3DES ecryption functions.
|
||||
@ -2192,16 +2152,6 @@ static inline int hifn_encrypt_3des_cbc(struct skcipher_request *req)
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
|
||||
}
|
||||
static inline int hifn_encrypt_3des_cfb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
|
||||
}
|
||||
static inline int hifn_encrypt_3des_ofb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
|
||||
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
|
||||
}
|
||||
|
||||
/* 3DES decryption functions. */
|
||||
static inline int hifn_decrypt_3des_ecb(struct skcipher_request *req)
|
||||
@ -2214,16 +2164,6 @@ static inline int hifn_decrypt_3des_cbc(struct skcipher_request *req)
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
|
||||
}
|
||||
static inline int hifn_decrypt_3des_cfb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
|
||||
}
|
||||
static inline int hifn_decrypt_3des_ofb(struct skcipher_request *req)
|
||||
{
|
||||
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
|
||||
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
|
||||
}
|
||||
|
||||
struct hifn_alg_template {
|
||||
char name[CRYPTO_MAX_ALG_NAME];
|
||||
@ -2234,28 +2174,8 @@ struct hifn_alg_template {
|
||||
|
||||
static const struct hifn_alg_template hifn_alg_templates[] = {
|
||||
/*
|
||||
* 3DES ECB, CBC, CFB and OFB modes.
|
||||
* 3DES ECB and CBC modes.
|
||||
*/
|
||||
{
|
||||
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
|
||||
.skcipher = {
|
||||
.min_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.setkey = hifn_des3_setkey,
|
||||
.encrypt = hifn_encrypt_3des_cfb,
|
||||
.decrypt = hifn_decrypt_3des_cfb,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
|
||||
.skcipher = {
|
||||
.min_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.setkey = hifn_des3_setkey,
|
||||
.encrypt = hifn_encrypt_3des_ofb,
|
||||
.decrypt = hifn_decrypt_3des_ofb,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
|
||||
.skcipher = {
|
||||
@ -2279,28 +2199,8 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
|
||||
},
|
||||
|
||||
/*
|
||||
* DES ECB, CBC, CFB and OFB modes.
|
||||
* DES ECB and CBC modes.
|
||||
*/
|
||||
{
|
||||
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
|
||||
.skcipher = {
|
||||
.min_keysize = HIFN_DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_DES_KEY_LENGTH,
|
||||
.setkey = hifn_setkey,
|
||||
.encrypt = hifn_encrypt_des_cfb,
|
||||
.decrypt = hifn_decrypt_des_cfb,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
|
||||
.skcipher = {
|
||||
.min_keysize = HIFN_DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_DES_KEY_LENGTH,
|
||||
.setkey = hifn_setkey,
|
||||
.encrypt = hifn_encrypt_des_ofb,
|
||||
.decrypt = hifn_decrypt_des_ofb,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
|
||||
.skcipher = {
|
||||
@ -2324,7 +2224,7 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
|
||||
},
|
||||
|
||||
/*
|
||||
* AES ECB, CBC, CFB and OFB modes.
|
||||
* AES ECB and CBC modes.
|
||||
*/
|
||||
{
|
||||
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
|
||||
@ -2347,26 +2247,6 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
|
||||
.decrypt = hifn_decrypt_aes_cbc,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
|
||||
.skcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = hifn_setkey,
|
||||
.encrypt = hifn_encrypt_aes_cfb,
|
||||
.decrypt = hifn_decrypt_aes_cfb,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
|
||||
.skcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = hifn_setkey,
|
||||
.encrypt = hifn_encrypt_aes_ofb,
|
||||
.decrypt = hifn_decrypt_aes_ofb,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static int hifn_init_tfm(struct crypto_skcipher *tfm)
|
||||
|
@ -31,6 +31,10 @@ static const char * const qm_debug_file_name[] = {
|
||||
[CLEAR_ENABLE] = "clear_enable",
|
||||
};
|
||||
|
||||
static const char * const qm_s[] = {
|
||||
"work", "stop",
|
||||
};
|
||||
|
||||
struct qm_dfx_item {
|
||||
const char *name;
|
||||
u32 offset;
|
||||
@ -53,34 +57,34 @@ static struct qm_dfx_item qm_dfx_files[] = {
|
||||
#define CNT_CYC_REGS_NUM 10
|
||||
static const struct debugfs_reg32 qm_dfx_regs[] = {
|
||||
/* XXX_CNT are reading clear register */
|
||||
{"QM_ECC_1BIT_CNT ", 0x104000ull},
|
||||
{"QM_ECC_MBIT_CNT ", 0x104008ull},
|
||||
{"QM_DFX_MB_CNT ", 0x104018ull},
|
||||
{"QM_DFX_DB_CNT ", 0x104028ull},
|
||||
{"QM_DFX_SQE_CNT ", 0x104038ull},
|
||||
{"QM_DFX_CQE_CNT ", 0x104048ull},
|
||||
{"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
|
||||
{"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
|
||||
{"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
|
||||
{"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
|
||||
{"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
|
||||
{"QM_ECC_1BIT_INF ", 0x104004ull},
|
||||
{"QM_ECC_MBIT_INF ", 0x10400cull},
|
||||
{"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
|
||||
{"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
|
||||
{"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
|
||||
{"QM_DFX_FF_ST0 ", 0x1040c8ull},
|
||||
{"QM_DFX_FF_ST1 ", 0x1040ccull},
|
||||
{"QM_DFX_FF_ST2 ", 0x1040d0ull},
|
||||
{"QM_DFX_FF_ST3 ", 0x1040d4ull},
|
||||
{"QM_DFX_FF_ST4 ", 0x1040d8ull},
|
||||
{"QM_DFX_FF_ST5 ", 0x1040dcull},
|
||||
{"QM_DFX_FF_ST6 ", 0x1040e0ull},
|
||||
{"QM_IN_IDLE_ST ", 0x1040e4ull},
|
||||
{"QM_ECC_1BIT_CNT ", 0x104000},
|
||||
{"QM_ECC_MBIT_CNT ", 0x104008},
|
||||
{"QM_DFX_MB_CNT ", 0x104018},
|
||||
{"QM_DFX_DB_CNT ", 0x104028},
|
||||
{"QM_DFX_SQE_CNT ", 0x104038},
|
||||
{"QM_DFX_CQE_CNT ", 0x104048},
|
||||
{"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050},
|
||||
{"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058},
|
||||
{"QM_DFX_ACC_FINISH_CNT ", 0x104060},
|
||||
{"QM_DFX_CQE_ERR_CNT ", 0x1040b4},
|
||||
{"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
|
||||
{"QM_ECC_1BIT_INF ", 0x104004},
|
||||
{"QM_ECC_MBIT_INF ", 0x10400c},
|
||||
{"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0},
|
||||
{"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4},
|
||||
{"QM_DFX_AXI_RDY_VLD ", 0x1040a8},
|
||||
{"QM_DFX_FF_ST0 ", 0x1040c8},
|
||||
{"QM_DFX_FF_ST1 ", 0x1040cc},
|
||||
{"QM_DFX_FF_ST2 ", 0x1040d0},
|
||||
{"QM_DFX_FF_ST3 ", 0x1040d4},
|
||||
{"QM_DFX_FF_ST4 ", 0x1040d8},
|
||||
{"QM_DFX_FF_ST5 ", 0x1040dc},
|
||||
{"QM_DFX_FF_ST6 ", 0x1040e0},
|
||||
{"QM_IN_IDLE_ST ", 0x1040e4},
|
||||
};
|
||||
|
||||
static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
|
||||
{"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
|
||||
{"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
|
||||
};
|
||||
|
||||
/* define the QM's dfx regs region and region length */
|
||||
|
@ -118,8 +118,6 @@
|
||||
#define HPRE_DFX_COMMON2_LEN 0xE
|
||||
#define HPRE_DFX_CORE_LEN 0x43
|
||||
|
||||
#define HPRE_DEV_ALG_MAX_LEN 256
|
||||
|
||||
static const char hpre_name[] = "hisi_hpre";
|
||||
static struct dentry *hpre_debugfs_root;
|
||||
static const struct pci_device_id hpre_dev_ids[] = {
|
||||
@ -135,12 +133,7 @@ struct hpre_hw_error {
|
||||
const char *msg;
|
||||
};
|
||||
|
||||
struct hpre_dev_alg {
|
||||
u32 alg_msk;
|
||||
const char *alg;
|
||||
};
|
||||
|
||||
static const struct hpre_dev_alg hpre_dev_algs[] = {
|
||||
static const struct qm_dev_alg hpre_dev_algs[] = {
|
||||
{
|
||||
.alg_msk = BIT(0),
|
||||
.alg = "rsa\n"
|
||||
@ -233,6 +226,20 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
|
||||
{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
|
||||
};
|
||||
|
||||
enum hpre_pre_store_cap_idx {
|
||||
HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
|
||||
HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
|
||||
HPRE_DRV_ALG_BITMAP_CAP_IDX,
|
||||
HPRE_DEV_ALG_BITMAP_CAP_IDX,
|
||||
};
|
||||
|
||||
static const u32 hpre_pre_store_caps[] = {
|
||||
HPRE_CLUSTER_NUM_CAP,
|
||||
HPRE_CORE_ENABLE_BITMAP_CAP,
|
||||
HPRE_DRV_ALG_BITMAP_CAP,
|
||||
HPRE_DEV_ALG_BITMAP_CAP,
|
||||
};
|
||||
|
||||
static const struct hpre_hw_error hpre_hw_errors[] = {
|
||||
{
|
||||
.int_msk = BIT(0),
|
||||
@ -355,42 +362,13 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
|
||||
{
|
||||
u32 cap_val;
|
||||
|
||||
cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
|
||||
cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
|
||||
if (alg & cap_val)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int hpre_set_qm_algs(struct hisi_qm *qm)
|
||||
{
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
char *algs, *ptr;
|
||||
u32 alg_msk;
|
||||
int i;
|
||||
|
||||
if (!qm->use_sva)
|
||||
return 0;
|
||||
|
||||
algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
|
||||
if (!algs)
|
||||
return -ENOMEM;
|
||||
|
||||
alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
|
||||
if (alg_msk & hpre_dev_algs[i].alg_msk)
|
||||
strcat(algs, hpre_dev_algs[i].alg);
|
||||
|
||||
ptr = strrchr(algs, '\n');
|
||||
if (ptr)
|
||||
*ptr = '\0';
|
||||
|
||||
qm->uacce->algs = algs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpre_diff_regs_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
struct hisi_qm *qm = s->private;
|
||||
@ -460,16 +438,6 @@ static u32 vfs_num;
|
||||
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
|
||||
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
|
||||
|
||||
static inline int hpre_cluster_num(struct hisi_qm *qm)
|
||||
{
|
||||
return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
|
||||
}
|
||||
|
||||
static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
|
||||
{
|
||||
return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
|
||||
}
|
||||
|
||||
struct hisi_qp *hpre_create_qp(u8 type)
|
||||
{
|
||||
int node = cpu_to_node(smp_processor_id());
|
||||
@ -536,13 +504,15 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
|
||||
|
||||
static int hpre_set_cluster(struct hisi_qm *qm)
|
||||
{
|
||||
u32 cluster_core_mask = hpre_cluster_core_mask(qm);
|
||||
u8 clusters_num = hpre_cluster_num(qm);
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
unsigned long offset;
|
||||
u32 cluster_core_mask;
|
||||
u8 clusters_num;
|
||||
u32 val = 0;
|
||||
int ret, i;
|
||||
|
||||
cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
|
||||
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
|
||||
for (i = 0; i < clusters_num; i++) {
|
||||
offset = i * HPRE_CLSTR_ADDR_INTRVL;
|
||||
|
||||
@ -737,11 +707,12 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
|
||||
|
||||
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
|
||||
{
|
||||
u8 clusters_num = hpre_cluster_num(qm);
|
||||
unsigned long offset;
|
||||
u8 clusters_num;
|
||||
int i;
|
||||
|
||||
/* clear clusterX/cluster_ctrl */
|
||||
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
|
||||
for (i = 0; i < clusters_num; i++) {
|
||||
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
|
||||
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
|
||||
@ -1028,13 +999,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
|
||||
|
||||
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
|
||||
{
|
||||
u8 clusters_num = hpre_cluster_num(qm);
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
char buf[HPRE_DBGFS_VAL_MAX_LEN];
|
||||
struct debugfs_regset32 *regset;
|
||||
struct dentry *tmp_d;
|
||||
u8 clusters_num;
|
||||
int i, ret;
|
||||
|
||||
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
|
||||
for (i = 0; i < clusters_num; i++) {
|
||||
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
|
||||
if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
|
||||
@ -1139,8 +1111,37 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
|
||||
debugfs_remove_recursive(qm->debug.debug_root);
|
||||
}
|
||||
|
||||
static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
|
||||
{
|
||||
struct hisi_qm_cap_record *hpre_cap;
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
size_t i, size;
|
||||
|
||||
size = ARRAY_SIZE(hpre_pre_store_caps);
|
||||
hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
|
||||
if (!hpre_cap)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
hpre_cap[i].type = hpre_pre_store_caps[i];
|
||||
hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
|
||||
hpre_pre_store_caps[i], qm->cap_ver);
|
||||
}
|
||||
|
||||
if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
|
||||
dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
|
||||
hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qm->cap_tables.dev_cap_table = hpre_cap;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
|
||||
{
|
||||
u64 alg_msk;
|
||||
int ret;
|
||||
|
||||
if (pdev->revision == QM_HW_V1) {
|
||||
@ -1171,7 +1172,16 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hpre_set_qm_algs(qm);
|
||||
/* Fetch and save the value of capability registers */
|
||||
ret = hpre_pre_store_cap_reg(qm);
|
||||
if (ret) {
|
||||
pci_err(pdev, "Failed to pre-store capability registers!\n");
|
||||
hisi_qm_uninit(qm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
|
||||
ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
|
||||
if (ret) {
|
||||
pci_err(pdev, "Failed to set hpre algs!\n");
|
||||
hisi_qm_uninit(qm);
|
||||
@ -1184,11 +1194,12 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
|
||||
{
|
||||
int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
|
||||
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
|
||||
u8 clusters_num = hpre_cluster_num(qm);
|
||||
struct qm_debug *debug = &qm->debug;
|
||||
void __iomem *io_base;
|
||||
u8 clusters_num;
|
||||
int i, j, idx;
|
||||
|
||||
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
|
||||
debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
|
||||
com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
|
||||
if (!debug->last_words)
|
||||
@ -1225,10 +1236,10 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
|
||||
{
|
||||
int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
|
||||
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
|
||||
u8 clusters_num = hpre_cluster_num(qm);
|
||||
struct qm_debug *debug = &qm->debug;
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
void __iomem *io_base;
|
||||
u8 clusters_num;
|
||||
int i, j, idx;
|
||||
u32 val;
|
||||
|
||||
@ -1243,6 +1254,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
|
||||
hpre_com_dfx_regs[i].name, debug->last_words[i], val);
|
||||
}
|
||||
|
||||
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
|
||||
for (i = 0; i < clusters_num; i++) {
|
||||
io_base = qm->io_base + hpre_cluster_offsets[i];
|
||||
for (j = 0; j < cluster_dfx_regs_num; j++) {
|
||||
|
@ -129,16 +129,21 @@
|
||||
#define QM_FIFO_OVERFLOW_TYPE 0xc0
|
||||
#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
|
||||
#define QM_FIFO_OVERFLOW_VF 0x3f
|
||||
#define QM_FIFO_OVERFLOW_QP_SHIFT 16
|
||||
#define QM_ABNORMAL_INF01 0x100014
|
||||
#define QM_DB_TIMEOUT_TYPE 0xc0
|
||||
#define QM_DB_TIMEOUT_TYPE_SHIFT 6
|
||||
#define QM_DB_TIMEOUT_VF 0x3f
|
||||
#define QM_DB_TIMEOUT_QP_SHIFT 16
|
||||
#define QM_ABNORMAL_INF02 0x100018
|
||||
#define QM_AXI_POISON_ERR BIT(22)
|
||||
#define QM_RAS_CE_ENABLE 0x1000ec
|
||||
#define QM_RAS_FE_ENABLE 0x1000f0
|
||||
#define QM_RAS_NFE_ENABLE 0x1000f4
|
||||
#define QM_RAS_CE_THRESHOLD 0x1000f8
|
||||
#define QM_RAS_CE_TIMES_PER_IRQ 1
|
||||
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
|
||||
#define QM_AXI_RRESP_ERR BIT(0)
|
||||
#define QM_ECC_MBIT BIT(2)
|
||||
#define QM_DB_TIMEOUT BIT(10)
|
||||
#define QM_OF_FIFO_OF BIT(11)
|
||||
@ -229,6 +234,8 @@
|
||||
#define QM_QOS_MAX_CIR_U 6
|
||||
#define QM_AUTOSUSPEND_DELAY 3000
|
||||
|
||||
#define QM_DEV_ALG_MAX_LEN 256
|
||||
|
||||
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
|
||||
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
|
||||
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
|
||||
@ -294,6 +301,13 @@ enum qm_basic_type {
|
||||
QM_VF_IRQ_NUM_CAP,
|
||||
};
|
||||
|
||||
enum qm_pre_store_cap_idx {
|
||||
QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
|
||||
QM_AEQ_IRQ_TYPE_CAP_IDX,
|
||||
QM_ABN_IRQ_TYPE_CAP_IDX,
|
||||
QM_PF2VF_IRQ_TYPE_CAP_IDX,
|
||||
};
|
||||
|
||||
static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
|
||||
{QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
|
||||
{QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
|
||||
@ -323,6 +337,13 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
|
||||
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
|
||||
};
|
||||
|
||||
static const u32 qm_pre_store_caps[] = {
|
||||
QM_EQ_IRQ_TYPE_CAP,
|
||||
QM_AEQ_IRQ_TYPE_CAP,
|
||||
QM_ABN_IRQ_TYPE_CAP,
|
||||
QM_PF2VF_IRQ_TYPE_CAP,
|
||||
};
|
||||
|
||||
struct qm_mailbox {
|
||||
__le16 w0;
|
||||
__le16 queue_num;
|
||||
@ -386,7 +407,6 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
|
||||
{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
|
||||
{ .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
|
||||
{ .int_msk = BIT(14), .msg = "qm_flr_timeout" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static const char * const qm_db_timeout[] = {
|
||||
@ -397,10 +417,6 @@ static const char * const qm_fifo_overflow[] = {
|
||||
"cq", "eq", "aeq",
|
||||
};
|
||||
|
||||
static const char * const qp_s[] = {
|
||||
"none", "init", "start", "stop", "close",
|
||||
};
|
||||
|
||||
struct qm_typical_qos_table {
|
||||
u32 start;
|
||||
u32 end;
|
||||
@ -428,85 +444,6 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
|
||||
|
||||
static void qm_irqs_unregister(struct hisi_qm *qm);
|
||||
|
||||
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
|
||||
{
|
||||
enum qm_state curr = atomic_read(&qm->status.flags);
|
||||
bool avail = false;
|
||||
|
||||
switch (curr) {
|
||||
case QM_INIT:
|
||||
if (new == QM_START || new == QM_CLOSE)
|
||||
avail = true;
|
||||
break;
|
||||
case QM_START:
|
||||
if (new == QM_STOP)
|
||||
avail = true;
|
||||
break;
|
||||
case QM_STOP:
|
||||
if (new == QM_CLOSE || new == QM_START)
|
||||
avail = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
|
||||
qm_s[curr], qm_s[new]);
|
||||
|
||||
if (!avail)
|
||||
dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
|
||||
qm_s[curr], qm_s[new]);
|
||||
|
||||
return avail;
|
||||
}
|
||||
|
||||
static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
|
||||
enum qp_state new)
|
||||
{
|
||||
enum qm_state qm_curr = atomic_read(&qm->status.flags);
|
||||
enum qp_state qp_curr = 0;
|
||||
bool avail = false;
|
||||
|
||||
if (qp)
|
||||
qp_curr = atomic_read(&qp->qp_status.flags);
|
||||
|
||||
switch (new) {
|
||||
case QP_INIT:
|
||||
if (qm_curr == QM_START || qm_curr == QM_INIT)
|
||||
avail = true;
|
||||
break;
|
||||
case QP_START:
|
||||
if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
|
||||
(qm_curr == QM_START && qp_curr == QP_STOP))
|
||||
avail = true;
|
||||
break;
|
||||
case QP_STOP:
|
||||
if ((qm_curr == QM_START && qp_curr == QP_START) ||
|
||||
(qp_curr == QP_INIT))
|
||||
avail = true;
|
||||
break;
|
||||
case QP_CLOSE:
|
||||
if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
|
||||
(qm_curr == QM_START && qp_curr == QP_STOP) ||
|
||||
(qm_curr == QM_STOP && qp_curr == QP_STOP) ||
|
||||
(qm_curr == QM_STOP && qp_curr == QP_INIT))
|
||||
avail = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
|
||||
qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
|
||||
|
||||
if (!avail)
|
||||
dev_warn(&qm->pdev->dev,
|
||||
"Can not change qp state from %s to %s in QM %s\n",
|
||||
qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
|
||||
|
||||
return avail;
|
||||
}
|
||||
|
||||
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
|
||||
{
|
||||
return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
|
||||
@ -660,9 +597,6 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
|
||||
struct qm_mailbox mailbox;
|
||||
int ret;
|
||||
|
||||
dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
|
||||
queue, cmd, (unsigned long long)dma_addr);
|
||||
|
||||
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
|
||||
|
||||
mutex_lock(&qm->mailbox_lock);
|
||||
@ -828,6 +762,40 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
|
||||
*high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
|
||||
}
|
||||
|
||||
int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
|
||||
u32 dev_algs_size)
|
||||
{
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
char *algs, *ptr;
|
||||
int i;
|
||||
|
||||
if (!qm->uacce)
|
||||
return 0;
|
||||
|
||||
if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) {
|
||||
dev_err(dev, "algs size %u is equal or larger than %d.\n",
|
||||
dev_algs_size, QM_DEV_ALG_MAX_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
|
||||
if (!algs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < dev_algs_size; i++)
|
||||
if (alg_msk & dev_algs[i].alg_msk)
|
||||
strcat(algs, dev_algs[i].alg);
|
||||
|
||||
ptr = strrchr(algs, '\n');
|
||||
if (ptr) {
|
||||
*ptr = '\0';
|
||||
qm->uacce->algs = algs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_set_algs);
|
||||
|
||||
static u32 qm_get_irq_num(struct hisi_qm *qm)
|
||||
{
|
||||
if (qm->fun_type == QM_HW_PF)
|
||||
@ -1406,7 +1374,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
|
||||
{
|
||||
const struct hisi_qm_hw_error *err;
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
u32 reg_val, type, vf_num;
|
||||
u32 reg_val, type, vf_num, qp_id;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
|
||||
@ -1422,19 +1390,24 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
|
||||
type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
|
||||
QM_DB_TIMEOUT_TYPE_SHIFT;
|
||||
vf_num = reg_val & QM_DB_TIMEOUT_VF;
|
||||
dev_err(dev, "qm %s doorbell timeout in function %u\n",
|
||||
qm_db_timeout[type], vf_num);
|
||||
qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT;
|
||||
dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n",
|
||||
qm_db_timeout[type], vf_num, qp_id);
|
||||
} else if (err->int_msk & QM_OF_FIFO_OF) {
|
||||
reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
|
||||
type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
|
||||
QM_FIFO_OVERFLOW_TYPE_SHIFT;
|
||||
vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
|
||||
|
||||
qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT;
|
||||
if (type < ARRAY_SIZE(qm_fifo_overflow))
|
||||
dev_err(dev, "qm %s fifo overflow in function %u\n",
|
||||
qm_fifo_overflow[type], vf_num);
|
||||
dev_err(dev, "qm %s fifo overflow in function %u qp %u\n",
|
||||
qm_fifo_overflow[type], vf_num, qp_id);
|
||||
else
|
||||
dev_err(dev, "unknown error type\n");
|
||||
} else if (err->int_msk & QM_AXI_RRESP_ERR) {
|
||||
reg_val = readl(qm->io_base + QM_ABNORMAL_INF02);
|
||||
if (reg_val & QM_AXI_POISON_ERR)
|
||||
dev_err(dev, "qm axi poison error happened\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1843,8 +1816,10 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
|
||||
struct hisi_qp *qp;
|
||||
int qp_id;
|
||||
|
||||
if (!qm_qp_avail_state(qm, NULL, QP_INIT))
|
||||
if (atomic_read(&qm->status.flags) == QM_STOP) {
|
||||
dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n");
|
||||
return ERR_PTR(-EPERM);
|
||||
}
|
||||
|
||||
if (qm->qp_in_used == qm->qp_num) {
|
||||
dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
|
||||
@ -1871,7 +1846,6 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
|
||||
qp->alg_type = alg_type;
|
||||
qp->is_in_kernel = true;
|
||||
qm->qp_in_used++;
|
||||
atomic_set(&qp->qp_status.flags, QP_INIT);
|
||||
|
||||
return qp;
|
||||
}
|
||||
@ -1914,11 +1888,6 @@ static void hisi_qm_release_qp(struct hisi_qp *qp)
|
||||
|
||||
down_write(&qm->qps_lock);
|
||||
|
||||
if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
|
||||
up_write(&qm->qps_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
qm->qp_in_used--;
|
||||
idr_remove(&qm->qp_idr, qp->qp_id);
|
||||
|
||||
@ -1966,6 +1935,11 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
|
||||
cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
|
||||
cqc.w8 = 0; /* rand_qc */
|
||||
}
|
||||
/*
|
||||
* Enable request finishing interrupts defaultly.
|
||||
* So, there will be some interrupts until disabling
|
||||
* this.
|
||||
*/
|
||||
cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
|
||||
cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma));
|
||||
cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma));
|
||||
@ -1998,8 +1972,10 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
|
||||
u32 pasid = arg;
|
||||
int ret;
|
||||
|
||||
if (!qm_qp_avail_state(qm, qp, QP_START))
|
||||
if (atomic_read(&qm->status.flags) == QM_STOP) {
|
||||
dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
|
||||
if (ret)
|
||||
@ -2121,21 +2097,17 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
|
||||
* is_resetting flag should be set negative so that this qp will not
|
||||
* be restarted after reset.
|
||||
*/
|
||||
if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
|
||||
if (atomic_read(&qp->qp_status.flags) != QP_START) {
|
||||
qp->is_resetting = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
|
||||
return -EPERM;
|
||||
|
||||
atomic_set(&qp->qp_status.flags, QP_STOP);
|
||||
|
||||
ret = qm_drain_qp(qp);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to drain out data for stopping!\n");
|
||||
|
||||
|
||||
flush_workqueue(qp->qm->wq);
|
||||
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
|
||||
qp_stop_fail_cb(qp);
|
||||
@ -2855,13 +2827,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
|
||||
{
|
||||
qm_cmd_uninit(qm);
|
||||
hisi_qm_unint_work(qm);
|
||||
|
||||
down_write(&qm->qps_lock);
|
||||
|
||||
if (!qm_avail_state(qm, QM_CLOSE)) {
|
||||
up_write(&qm->qps_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
hisi_qm_memory_uninit(qm);
|
||||
hisi_qm_set_state(qm, QM_NOT_READY);
|
||||
up_write(&qm->qps_lock);
|
||||
@ -3035,11 +3002,6 @@ int hisi_qm_start(struct hisi_qm *qm)
|
||||
|
||||
down_write(&qm->qps_lock);
|
||||
|
||||
if (!qm_avail_state(qm, QM_START)) {
|
||||
up_write(&qm->qps_lock);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
|
||||
|
||||
if (!qm->qp_num) {
|
||||
@ -3049,10 +3011,12 @@ int hisi_qm_start(struct hisi_qm *qm)
|
||||
}
|
||||
|
||||
ret = __hisi_qm_start(qm);
|
||||
if (!ret)
|
||||
atomic_set(&qm->status.flags, QM_START);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
atomic_set(&qm->status.flags, QM_WORK);
|
||||
hisi_qm_set_state(qm, QM_READY);
|
||||
|
||||
err_unlock:
|
||||
up_write(&qm->qps_lock);
|
||||
return ret;
|
||||
@ -3149,10 +3113,11 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
|
||||
down_write(&qm->qps_lock);
|
||||
|
||||
qm->status.stop_reason = r;
|
||||
if (!qm_avail_state(qm, QM_STOP)) {
|
||||
ret = -EPERM;
|
||||
if (atomic_read(&qm->status.flags) == QM_STOP)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/* Stop all the request sending at first. */
|
||||
atomic_set(&qm->status.flags, QM_STOP);
|
||||
|
||||
if (qm->status.stop_reason == QM_SOFT_RESET ||
|
||||
qm->status.stop_reason == QM_DOWN) {
|
||||
@ -3176,7 +3141,6 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
|
||||
}
|
||||
|
||||
qm_clear_queues(qm);
|
||||
atomic_set(&qm->status.flags, QM_STOP);
|
||||
|
||||
err_unlock:
|
||||
up_write(&qm->qps_lock);
|
||||
@ -3966,6 +3930,11 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
|
||||
int pos;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Since function qm_set_vf_mse is called only after SRIOV is enabled,
|
||||
* pci_find_ext_capability cannot return 0, pos does not need to be
|
||||
* checked.
|
||||
*/
|
||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
|
||||
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
|
||||
if (set)
|
||||
@ -4816,7 +4785,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
|
||||
if (qm->fun_type == QM_HW_VF)
|
||||
return;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
|
||||
return;
|
||||
|
||||
@ -4833,7 +4802,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
|
||||
if (qm->fun_type == QM_HW_VF)
|
||||
return 0;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
|
||||
return 0;
|
||||
|
||||
@ -4850,7 +4819,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
u32 irq_vector, val;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
|
||||
return;
|
||||
|
||||
@ -4864,7 +4833,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
|
||||
u32 irq_vector, val;
|
||||
int ret;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
|
||||
return 0;
|
||||
|
||||
@ -4881,7 +4850,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
u32 irq_vector, val;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
|
||||
return;
|
||||
|
||||
@ -4895,7 +4864,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
|
||||
u32 irq_vector, val;
|
||||
int ret;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
|
||||
return 0;
|
||||
|
||||
@ -4913,7 +4882,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
u32 irq_vector, val;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
|
||||
return;
|
||||
|
||||
@ -4927,7 +4896,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
|
||||
u32 irq_vector, val;
|
||||
int ret;
|
||||
|
||||
val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
|
||||
val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
|
||||
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
|
||||
return 0;
|
||||
|
||||
@ -5015,7 +4984,29 @@ static int qm_get_qp_num(struct hisi_qm *qm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qm_get_hw_caps(struct hisi_qm *qm)
|
||||
static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
|
||||
{
|
||||
struct hisi_qm_cap_record *qm_cap;
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
size_t i, size;
|
||||
|
||||
size = ARRAY_SIZE(qm_pre_store_caps);
|
||||
qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
|
||||
if (!qm_cap)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
qm_cap[i].type = qm_pre_store_caps[i];
|
||||
qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
|
||||
qm_pre_store_caps[i], qm->cap_ver);
|
||||
}
|
||||
|
||||
qm->cap_tables.qm_cap_table = qm_cap;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qm_get_hw_caps(struct hisi_qm *qm)
|
||||
{
|
||||
const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
|
||||
qm_cap_info_pf : qm_cap_info_vf;
|
||||
@ -5046,6 +5037,9 @@ static void qm_get_hw_caps(struct hisi_qm *qm)
|
||||
if (val)
|
||||
set_bit(cap_info[i].type, &qm->caps);
|
||||
}
|
||||
|
||||
/* Fetch and save the value of irq type related capability registers */
|
||||
return qm_pre_store_irq_type_caps(qm);
|
||||
}
|
||||
|
||||
static int qm_get_pci_res(struct hisi_qm *qm)
|
||||
@ -5067,7 +5061,10 @@ static int qm_get_pci_res(struct hisi_qm *qm)
|
||||
goto err_request_mem_regions;
|
||||
}
|
||||
|
||||
qm_get_hw_caps(qm);
|
||||
ret = qm_get_hw_caps(qm);
|
||||
if (ret)
|
||||
goto err_ioremap;
|
||||
|
||||
if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
|
||||
qm->db_interval = QM_QP_DB_INTERVAL;
|
||||
qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
|
||||
@ -5340,7 +5337,6 @@ int hisi_qm_init(struct hisi_qm *qm)
|
||||
goto err_free_qm_memory;
|
||||
|
||||
qm_cmd_init(qm);
|
||||
atomic_set(&qm->status.flags, QM_INIT);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -72,10 +72,6 @@ struct qm_aeqc {
|
||||
__le32 dw6;
|
||||
};
|
||||
|
||||
static const char * const qm_s[] = {
|
||||
"init", "start", "close", "stop",
|
||||
};
|
||||
|
||||
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op);
|
||||
void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
|
||||
void hisi_qm_set_algqos_init(struct hisi_qm *qm);
|
||||
|
@ -220,6 +220,13 @@ enum sec_cap_type {
|
||||
SEC_CORE4_ALG_BITMAP_HIGH,
|
||||
};
|
||||
|
||||
enum sec_cap_reg_record_idx {
|
||||
SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
|
||||
SEC_DRV_ALG_BITMAP_HIGH_IDX,
|
||||
SEC_DEV_ALG_BITMAP_LOW_IDX,
|
||||
SEC_DEV_ALG_BITMAP_HIGH_IDX,
|
||||
};
|
||||
|
||||
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
|
||||
struct hisi_qp **sec_create_qps(void);
|
||||
int sec_register_to_crypto(struct hisi_qm *qm);
|
||||
|
@ -850,6 +850,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "sec c_alg err!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -879,15 +880,11 @@ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
|
||||
GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
|
||||
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
|
||||
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
|
||||
GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
|
||||
GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
|
||||
GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
|
||||
GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
|
||||
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
|
||||
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
|
||||
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
|
||||
GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
|
||||
GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
|
||||
GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
|
||||
|
||||
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
|
||||
@ -1176,7 +1173,8 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (crypto_authenc_extractkeys(&keys, key, keylen))
|
||||
ret = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (ret)
|
||||
goto bad_key;
|
||||
|
||||
ret = sec_aead_aes_set_key(c_ctx, &keys);
|
||||
@ -1193,6 +1191,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
|
||||
if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
|
||||
(ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
|
||||
ret = -EINVAL;
|
||||
dev_err(dev, "MAC or AUTH key length error!\n");
|
||||
goto bad_key;
|
||||
}
|
||||
@ -1201,7 +1200,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
|
||||
bad_key:
|
||||
memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@ -2032,8 +2031,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
break;
|
||||
case SEC_CMODE_CFB:
|
||||
case SEC_CMODE_OFB:
|
||||
case SEC_CMODE_CTR:
|
||||
if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
|
||||
dev_err(dev, "skcipher HW version error!\n");
|
||||
@ -2197,16 +2194,6 @@ static struct sec_skcipher sec_skciphers[] = {
|
||||
.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
|
||||
SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
|
||||
},
|
||||
{
|
||||
.alg_msk = BIT(4),
|
||||
.alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
|
||||
AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
|
||||
},
|
||||
{
|
||||
.alg_msk = BIT(5),
|
||||
.alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
|
||||
AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
|
||||
},
|
||||
{
|
||||
.alg_msk = BIT(12),
|
||||
.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
|
||||
@ -2222,16 +2209,6 @@ static struct sec_skcipher sec_skciphers[] = {
|
||||
.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
|
||||
SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
|
||||
},
|
||||
{
|
||||
.alg_msk = BIT(15),
|
||||
.alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
|
||||
AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
|
||||
},
|
||||
{
|
||||
.alg_msk = BIT(16),
|
||||
.alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
|
||||
AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
|
||||
},
|
||||
{
|
||||
.alg_msk = BIT(23),
|
||||
.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
|
||||
@ -2547,9 +2524,12 @@ err:
|
||||
|
||||
int sec_register_to_crypto(struct hisi_qm *qm)
|
||||
{
|
||||
u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
|
||||
u64 alg_mask;
|
||||
int ret = 0;
|
||||
|
||||
alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
|
||||
SEC_DRV_ALG_BITMAP_LOW_IDX);
|
||||
|
||||
mutex_lock(&sec_algs_lock);
|
||||
if (sec_available_devs) {
|
||||
sec_available_devs++;
|
||||
@ -2578,7 +2558,10 @@ unlock:
|
||||
|
||||
void sec_unregister_from_crypto(struct hisi_qm *qm)
|
||||
{
|
||||
u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
|
||||
u64 alg_mask;
|
||||
|
||||
alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
|
||||
SEC_DRV_ALG_BITMAP_LOW_IDX);
|
||||
|
||||
mutex_lock(&sec_algs_lock);
|
||||
if (--sec_available_devs)
|
||||
|
@ -37,8 +37,6 @@ enum sec_mac_len {
|
||||
enum sec_cmode {
|
||||
SEC_CMODE_ECB = 0x0,
|
||||
SEC_CMODE_CBC = 0x1,
|
||||
SEC_CMODE_CFB = 0x2,
|
||||
SEC_CMODE_OFB = 0x3,
|
||||
SEC_CMODE_CTR = 0x4,
|
||||
SEC_CMODE_CCM = 0x5,
|
||||
SEC_CMODE_GCM = 0x6,
|
||||
|
@ -120,7 +120,6 @@
|
||||
GENMASK_ULL(42, 25))
|
||||
#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
|
||||
GENMASK_ULL(45, 43))
|
||||
#define SEC_DEV_ALG_MAX_LEN 256
|
||||
|
||||
struct sec_hw_error {
|
||||
u32 int_msk;
|
||||
@ -132,11 +131,6 @@ struct sec_dfx_item {
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
struct sec_dev_alg {
|
||||
u64 alg_msk;
|
||||
const char *algs;
|
||||
};
|
||||
|
||||
static const char sec_name[] = "hisi_sec2";
|
||||
static struct dentry *sec_debugfs_root;
|
||||
|
||||
@ -159,7 +153,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
|
||||
{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
|
||||
{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
|
||||
{SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
|
||||
{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
|
||||
{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
|
||||
{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
|
||||
{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
|
||||
{SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
|
||||
@ -173,15 +167,22 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
|
||||
{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
|
||||
};
|
||||
|
||||
static const struct sec_dev_alg sec_dev_algs[] = { {
|
||||
static const u32 sec_pre_store_caps[] = {
|
||||
SEC_DRV_ALG_BITMAP_LOW,
|
||||
SEC_DRV_ALG_BITMAP_HIGH,
|
||||
SEC_DEV_ALG_BITMAP_LOW,
|
||||
SEC_DEV_ALG_BITMAP_HIGH,
|
||||
};
|
||||
|
||||
static const struct qm_dev_alg sec_dev_algs[] = { {
|
||||
.alg_msk = SEC_CIPHER_BITMAP,
|
||||
.algs = "cipher\n",
|
||||
.alg = "cipher\n",
|
||||
}, {
|
||||
.alg_msk = SEC_DIGEST_BITMAP,
|
||||
.algs = "digest\n",
|
||||
.alg = "digest\n",
|
||||
}, {
|
||||
.alg_msk = SEC_AEAD_BITMAP,
|
||||
.algs = "aead\n",
|
||||
.alg = "aead\n",
|
||||
},
|
||||
};
|
||||
|
||||
@ -394,8 +395,8 @@ u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
|
||||
{
|
||||
u32 cap_val_h, cap_val_l;
|
||||
|
||||
cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
|
||||
cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
|
||||
cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val;
|
||||
cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val;
|
||||
|
||||
return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
|
||||
}
|
||||
@ -1077,37 +1078,31 @@ static int sec_pf_probe_init(struct sec_dev *sec)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sec_set_qm_algs(struct hisi_qm *qm)
|
||||
static int sec_pre_store_cap_reg(struct hisi_qm *qm)
|
||||
{
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
char *algs, *ptr;
|
||||
u64 alg_mask;
|
||||
int i;
|
||||
struct hisi_qm_cap_record *sec_cap;
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
size_t i, size;
|
||||
|
||||
if (!qm->use_sva)
|
||||
return 0;
|
||||
|
||||
algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
|
||||
if (!algs)
|
||||
size = ARRAY_SIZE(sec_pre_store_caps);
|
||||
sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
|
||||
if (!sec_cap)
|
||||
return -ENOMEM;
|
||||
|
||||
alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
|
||||
for (i = 0; i < size; i++) {
|
||||
sec_cap[i].type = sec_pre_store_caps[i];
|
||||
sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
|
||||
sec_pre_store_caps[i], qm->cap_ver);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
|
||||
if (alg_mask & sec_dev_algs[i].alg_msk)
|
||||
strcat(algs, sec_dev_algs[i].algs);
|
||||
|
||||
ptr = strrchr(algs, '\n');
|
||||
if (ptr)
|
||||
*ptr = '\0';
|
||||
|
||||
qm->uacce->algs = algs;
|
||||
qm->cap_tables.dev_cap_table = sec_cap;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
|
||||
{
|
||||
u64 alg_msk;
|
||||
int ret;
|
||||
|
||||
qm->pdev = pdev;
|
||||
@ -1142,7 +1137,16 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sec_set_qm_algs(qm);
|
||||
/* Fetch and save the value of capability registers */
|
||||
ret = sec_pre_store_cap_reg(qm);
|
||||
if (ret) {
|
||||
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
|
||||
hisi_qm_uninit(qm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
|
||||
ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
|
||||
if (ret) {
|
||||
pci_err(qm->pdev, "Failed to set sec algs!\n");
|
||||
hisi_qm_uninit(qm);
|
||||
|
@ -121,10 +121,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
|
||||
return pool;
|
||||
|
||||
err_free_mem:
|
||||
for (j = 0; j < i; j++) {
|
||||
for (j = 0; j < i; j++)
|
||||
dma_free_coherent(dev, block_size, block[j].sgl,
|
||||
block[j].sgl_dma);
|
||||
}
|
||||
|
||||
kfree_sensitive(pool);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
|
||||
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
|
||||
{
|
||||
struct mem_block *block;
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
if (!dev || !pool)
|
||||
return;
|
||||
@ -196,9 +196,10 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
|
||||
static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
|
||||
{
|
||||
struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
|
||||
u16 entry_sum = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
|
||||
for (i = 0; i < entry_sum; i++) {
|
||||
hw_sge[i].page_ctrl = NULL;
|
||||
hw_sge[i].buf = 0;
|
||||
hw_sge[i].len = 0;
|
||||
@ -223,10 +224,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
|
||||
u32 index, dma_addr_t *hw_sgl_dma)
|
||||
{
|
||||
struct hisi_acc_hw_sgl *curr_hw_sgl;
|
||||
unsigned int i, sg_n_mapped;
|
||||
dma_addr_t curr_sgl_dma = 0;
|
||||
struct acc_hw_sge *curr_hw_sge;
|
||||
struct scatterlist *sg;
|
||||
int i, sg_n, sg_n_mapped;
|
||||
int sg_n;
|
||||
|
||||
if (!dev || !sgl || !pool || !hw_sgl_dma)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user