mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Removed CRYPTO_TFM_RES flags - Extended spawn grabbing to all algorithm types - Moved hash descsize verification into API code Algorithms: - Fixed recursive pcrypt dead-lock - Added new 32 and 64-bit generic versions of poly1305 - Added cryptogams implementation of x86/poly1305 Drivers: - Added support for i.MX8M Mini in caam - Added support for i.MX8M Nano in caam - Added support for i.MX8M Plus in caam - Added support for A33 variant of SS in sun4i-ss - Added TEE support for Raven Ridge in ccp - Added in-kernel API to submit TEE commands in ccp - Added AMD-TEE driver - Added support for BCM2711 in iproc-rng200 - Added support for AES256-GCM based ciphers for chtls - Added aead support on SEC2 in hisilicon" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (244 commits) crypto: arm/chacha - fix build failured when kernel mode NEON is disabled crypto: caam - add support for i.MX8M Plus crypto: x86/poly1305 - emit does base conversion itself crypto: hisilicon - fix spelling mistake "disgest" -> "digest" crypto: chacha20poly1305 - add back missing test vectors and test chunking crypto: x86/poly1305 - fix .gitignore typo tee: fix memory allocation failure checks on drv_data and amdtee crypto: ccree - erase unneeded inline funcs crypto: ccree - make cc_pm_put_suspend() void crypto: ccree - split overloaded usage of irq field crypto: ccree - fix PM race condition crypto: ccree - fix FDE descriptor sequence crypto: ccree - cc_do_send_request() is void func crypto: ccree - fix pm wrongful error reporting crypto: ccree - turn errors to debug msgs crypto: ccree - fix AEAD decrypt auth fail crypto: ccree - fix typo in comment crypto: ccree - fix typos in error msgs crypto: atmel-{aes,sha,tdes} - Retire crypto_platform_data crypto: x86/sha - Eliminate casts on asm implementations ...
This commit is contained in:
commit
a78208e243
1
.mailmap
1
.mailmap
@ -139,6 +139,7 @@ Juha Yrjola <at solidboot.com>
|
|||||||
Juha Yrjola <juha.yrjola@nokia.com>
|
Juha Yrjola <juha.yrjola@nokia.com>
|
||||||
Juha Yrjola <juha.yrjola@solidboot.com>
|
Juha Yrjola <juha.yrjola@solidboot.com>
|
||||||
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
|
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
|
||||||
|
Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
|
||||||
Kay Sievers <kay.sievers@vrfy.org>
|
Kay Sievers <kay.sievers@vrfy.org>
|
||||||
Kenneth W Chen <kenneth.w.chen@intel.com>
|
Kenneth W Chen <kenneth.w.chen@intel.com>
|
||||||
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
|
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
|
||||||
|
@ -39,6 +39,7 @@ Core utilities
|
|||||||
../RCU/index
|
../RCU/index
|
||||||
gcc-plugins
|
gcc-plugins
|
||||||
symbol-namespaces
|
symbol-namespaces
|
||||||
|
padata
|
||||||
|
|
||||||
|
|
||||||
Interfaces for kernel debugging
|
Interfaces for kernel debugging
|
||||||
|
169
Documentation/core-api/padata.rst
Normal file
169
Documentation/core-api/padata.rst
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
=======================================
|
||||||
|
The padata parallel execution mechanism
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
:Date: December 2019
|
||||||
|
|
||||||
|
Padata is a mechanism by which the kernel can farm jobs out to be done in
|
||||||
|
parallel on multiple CPUs while retaining their ordering. It was developed for
|
||||||
|
use with the IPsec code, which needs to be able to perform encryption and
|
||||||
|
decryption on large numbers of packets without reordering those packets. The
|
||||||
|
crypto developers made a point of writing padata in a sufficiently general
|
||||||
|
fashion that it could be put to other uses as well.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
=====
|
||||||
|
|
||||||
|
Initializing
|
||||||
|
------------
|
||||||
|
|
||||||
|
The first step in using padata is to set up a padata_instance structure for
|
||||||
|
overall control of how jobs are to be run::
|
||||||
|
|
||||||
|
#include <linux/padata.h>
|
||||||
|
|
||||||
|
struct padata_instance *padata_alloc_possible(const char *name);
|
||||||
|
|
||||||
|
'name' simply identifies the instance.
|
||||||
|
|
||||||
|
There are functions for enabling and disabling the instance::
|
||||||
|
|
||||||
|
int padata_start(struct padata_instance *pinst);
|
||||||
|
void padata_stop(struct padata_instance *pinst);
|
||||||
|
|
||||||
|
These functions are setting or clearing the "PADATA_INIT" flag; if that flag is
|
||||||
|
not set, other functions will refuse to work. padata_start() returns zero on
|
||||||
|
success (flag set) or -EINVAL if the padata cpumask contains no active CPU
|
||||||
|
(flag not set). padata_stop() clears the flag and blocks until the padata
|
||||||
|
instance is unused.
|
||||||
|
|
||||||
|
Finally, complete padata initialization by allocating a padata_shell::
|
||||||
|
|
||||||
|
struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
|
||||||
|
|
||||||
|
A padata_shell is used to submit a job to padata and allows a series of such
|
||||||
|
jobs to be serialized independently. A padata_instance may have one or more
|
||||||
|
padata_shells associated with it, each allowing a separate series of jobs.
|
||||||
|
|
||||||
|
Modifying cpumasks
|
||||||
|
------------------
|
||||||
|
|
||||||
|
The CPUs used to run jobs can be changed in two ways, programatically with
|
||||||
|
padata_set_cpumask() or via sysfs. The former is defined::
|
||||||
|
|
||||||
|
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
||||||
|
cpumask_var_t cpumask);
|
||||||
|
|
||||||
|
Here cpumask_type is one of PADATA_CPU_PARALLEL or PADATA_CPU_SERIAL, where a
|
||||||
|
parallel cpumask describes which processors will be used to execute jobs
|
||||||
|
submitted to this instance in parallel and a serial cpumask defines which
|
||||||
|
processors are allowed to be used as the serialization callback processor.
|
||||||
|
cpumask specifies the new cpumask to use.
|
||||||
|
|
||||||
|
There may be sysfs files for an instance's cpumasks. For example, pcrypt's
|
||||||
|
live in /sys/kernel/pcrypt/<instance-name>. Within an instance's directory
|
||||||
|
there are two files, parallel_cpumask and serial_cpumask, and either cpumask
|
||||||
|
may be changed by echoing a bitmask into the file, for example::
|
||||||
|
|
||||||
|
echo f > /sys/kernel/pcrypt/pencrypt/parallel_cpumask
|
||||||
|
|
||||||
|
Reading one of these files shows the user-supplied cpumask, which may be
|
||||||
|
different from the 'usable' cpumask.
|
||||||
|
|
||||||
|
Padata maintains two pairs of cpumasks internally, the user-supplied cpumasks
|
||||||
|
and the 'usable' cpumasks. (Each pair consists of a parallel and a serial
|
||||||
|
cpumask.) The user-supplied cpumasks default to all possible CPUs on instance
|
||||||
|
allocation and may be changed as above. The usable cpumasks are always a
|
||||||
|
subset of the user-supplied cpumasks and contain only the online CPUs in the
|
||||||
|
user-supplied masks; these are the cpumasks padata actually uses. So it is
|
||||||
|
legal to supply a cpumask to padata that contains offline CPUs. Once an
|
||||||
|
offline CPU in the user-supplied cpumask comes online, padata is going to use
|
||||||
|
it.
|
||||||
|
|
||||||
|
Changing the CPU masks are expensive operations, so it should not be done with
|
||||||
|
great frequency.
|
||||||
|
|
||||||
|
Running A Job
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Actually submitting work to the padata instance requires the creation of a
|
||||||
|
padata_priv structure, which represents one job::
|
||||||
|
|
||||||
|
struct padata_priv {
|
||||||
|
/* Other stuff here... */
|
||||||
|
void (*parallel)(struct padata_priv *padata);
|
||||||
|
void (*serial)(struct padata_priv *padata);
|
||||||
|
};
|
||||||
|
|
||||||
|
This structure will almost certainly be embedded within some larger
|
||||||
|
structure specific to the work to be done. Most of its fields are private to
|
||||||
|
padata, but the structure should be zeroed at initialisation time, and the
|
||||||
|
parallel() and serial() functions should be provided. Those functions will
|
||||||
|
be called in the process of getting the work done as we will see
|
||||||
|
momentarily.
|
||||||
|
|
||||||
|
The submission of the job is done with::
|
||||||
|
|
||||||
|
int padata_do_parallel(struct padata_shell *ps,
|
||||||
|
struct padata_priv *padata, int *cb_cpu);
|
||||||
|
|
||||||
|
The ps and padata structures must be set up as described above; cb_cpu
|
||||||
|
points to the preferred CPU to be used for the final callback when the job is
|
||||||
|
done; it must be in the current instance's CPU mask (if not the cb_cpu pointer
|
||||||
|
is updated to point to the CPU actually chosen). The return value from
|
||||||
|
padata_do_parallel() is zero on success, indicating that the job is in
|
||||||
|
progress. -EBUSY means that somebody, somewhere else is messing with the
|
||||||
|
instance's CPU mask, while -EINVAL is a complaint about cb_cpu not being in the
|
||||||
|
serial cpumask, no online CPUs in the parallel or serial cpumasks, or a stopped
|
||||||
|
instance.
|
||||||
|
|
||||||
|
Each job submitted to padata_do_parallel() will, in turn, be passed to
|
||||||
|
exactly one call to the above-mentioned parallel() function, on one CPU, so
|
||||||
|
true parallelism is achieved by submitting multiple jobs. parallel() runs with
|
||||||
|
software interrupts disabled and thus cannot sleep. The parallel()
|
||||||
|
function gets the padata_priv structure pointer as its lone parameter;
|
||||||
|
information about the actual work to be done is probably obtained by using
|
||||||
|
container_of() to find the enclosing structure.
|
||||||
|
|
||||||
|
Note that parallel() has no return value; the padata subsystem assumes that
|
||||||
|
parallel() will take responsibility for the job from this point. The job
|
||||||
|
need not be completed during this call, but, if parallel() leaves work
|
||||||
|
outstanding, it should be prepared to be called again with a new job before
|
||||||
|
the previous one completes.
|
||||||
|
|
||||||
|
Serializing Jobs
|
||||||
|
----------------
|
||||||
|
|
||||||
|
When a job does complete, parallel() (or whatever function actually finishes
|
||||||
|
the work) should inform padata of the fact with a call to::
|
||||||
|
|
||||||
|
void padata_do_serial(struct padata_priv *padata);
|
||||||
|
|
||||||
|
At some point in the future, padata_do_serial() will trigger a call to the
|
||||||
|
serial() function in the padata_priv structure. That call will happen on
|
||||||
|
the CPU requested in the initial call to padata_do_parallel(); it, too, is
|
||||||
|
run with local software interrupts disabled.
|
||||||
|
Note that this call may be deferred for a while since the padata code takes
|
||||||
|
pains to ensure that jobs are completed in the order in which they were
|
||||||
|
submitted.
|
||||||
|
|
||||||
|
Destroying
|
||||||
|
----------
|
||||||
|
|
||||||
|
Cleaning up a padata instance predictably involves calling the three free
|
||||||
|
functions that correspond to the allocation in reverse::
|
||||||
|
|
||||||
|
void padata_free_shell(struct padata_shell *ps);
|
||||||
|
void padata_stop(struct padata_instance *pinst);
|
||||||
|
void padata_free(struct padata_instance *pinst);
|
||||||
|
|
||||||
|
It is the user's responsibility to ensure all outstanding jobs are complete
|
||||||
|
before any of the above are called.
|
||||||
|
|
||||||
|
Interface
|
||||||
|
=========
|
||||||
|
|
||||||
|
.. kernel-doc:: include/linux/padata.h
|
||||||
|
.. kernel-doc:: kernel/padata.c
|
@ -31,33 +31,23 @@ The counterparts to those functions are listed below.
|
|||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
int crypto_unregister_alg(struct crypto_alg *alg);
|
void crypto_unregister_alg(struct crypto_alg *alg);
|
||||||
int crypto_unregister_algs(struct crypto_alg *algs, int count);
|
void crypto_unregister_algs(struct crypto_alg *algs, int count);
|
||||||
|
|
||||||
|
|
||||||
Notice that both registration and unregistration functions do return a
|
The registration functions return 0 on success, or a negative errno
|
||||||
value, so make sure to handle errors. A return code of zero implies
|
value on failure. crypto_register_algs() succeeds only if it
|
||||||
success. Any return code < 0 implies an error.
|
successfully registered all the given algorithms; if it fails partway
|
||||||
|
through, then any changes are rolled back.
|
||||||
|
|
||||||
The bulk registration/unregistration functions register/unregister each
|
The unregistration functions always succeed, so they don't have a
|
||||||
transformation in the given array of length count. They handle errors as
|
return value. Don't try to unregister algorithms that aren't
|
||||||
follows:
|
currently registered.
|
||||||
|
|
||||||
- crypto_register_algs() succeeds if and only if it successfully
|
|
||||||
registers all the given transformations. If an error occurs partway
|
|
||||||
through, then it rolls back successful registrations before returning
|
|
||||||
the error code. Note that if a driver needs to handle registration
|
|
||||||
errors for individual transformations, then it will need to use the
|
|
||||||
non-bulk function crypto_register_alg() instead.
|
|
||||||
|
|
||||||
- crypto_unregister_algs() tries to unregister all the given
|
|
||||||
transformations, continuing on error. It logs errors and always
|
|
||||||
returns zero.
|
|
||||||
|
|
||||||
Single-Block Symmetric Ciphers [CIPHER]
|
Single-Block Symmetric Ciphers [CIPHER]
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
|
|
||||||
Example of transformations: aes, arc4, ...
|
Example of transformations: aes, serpent, ...
|
||||||
|
|
||||||
This section describes the simplest of all transformation
|
This section describes the simplest of all transformation
|
||||||
implementations, that being the CIPHER type used for symmetric ciphers.
|
implementations, that being the CIPHER type used for symmetric ciphers.
|
||||||
@ -108,7 +98,7 @@ is also valid:
|
|||||||
Multi-Block Ciphers
|
Multi-Block Ciphers
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
Example of transformations: cbc(aes), ecb(arc4), ...
|
Example of transformations: cbc(aes), chacha20, ...
|
||||||
|
|
||||||
This section describes the multi-block cipher transformation
|
This section describes the multi-block cipher transformation
|
||||||
implementations. The multi-block ciphers are used for transformations
|
implementations. The multi-block ciphers are used for transformations
|
||||||
@ -169,10 +159,10 @@ are as follows:
|
|||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
int crypto_unregister_ahash(struct ahash_alg *alg);
|
void crypto_unregister_ahash(struct ahash_alg *alg);
|
||||||
|
|
||||||
int crypto_unregister_shash(struct shash_alg *alg);
|
void crypto_unregister_shash(struct shash_alg *alg);
|
||||||
int crypto_unregister_shashes(struct shash_alg *algs, int count);
|
void crypto_unregister_shashes(struct shash_alg *algs, int count);
|
||||||
|
|
||||||
|
|
||||||
Cipher Definition With struct shash_alg and ahash_alg
|
Cipher Definition With struct shash_alg and ahash_alg
|
||||||
|
@ -2,6 +2,7 @@ HWRNG support for the iproc-rng200 driver
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible : Must be one of:
|
- compatible : Must be one of:
|
||||||
|
"brcm,bcm2711-rng200"
|
||||||
"brcm,bcm7211-rng200"
|
"brcm,bcm7211-rng200"
|
||||||
"brcm,bcm7278-rng200"
|
"brcm,bcm7278-rng200"
|
||||||
"brcm,iproc-rng200"
|
"brcm,iproc-rng200"
|
||||||
|
@ -1,163 +0,0 @@
|
|||||||
=======================================
|
|
||||||
The padata parallel execution mechanism
|
|
||||||
=======================================
|
|
||||||
|
|
||||||
:Last updated: for 2.6.36
|
|
||||||
|
|
||||||
Padata is a mechanism by which the kernel can farm work out to be done in
|
|
||||||
parallel on multiple CPUs while retaining the ordering of tasks. It was
|
|
||||||
developed for use with the IPsec code, which needs to be able to perform
|
|
||||||
encryption and decryption on large numbers of packets without reordering
|
|
||||||
those packets. The crypto developers made a point of writing padata in a
|
|
||||||
sufficiently general fashion that it could be put to other uses as well.
|
|
||||||
|
|
||||||
The first step in using padata is to set up a padata_instance structure for
|
|
||||||
overall control of how tasks are to be run::
|
|
||||||
|
|
||||||
#include <linux/padata.h>
|
|
||||||
|
|
||||||
struct padata_instance *padata_alloc(const char *name,
|
|
||||||
const struct cpumask *pcpumask,
|
|
||||||
const struct cpumask *cbcpumask);
|
|
||||||
|
|
||||||
'name' simply identifies the instance.
|
|
||||||
|
|
||||||
The pcpumask describes which processors will be used to execute work
|
|
||||||
submitted to this instance in parallel. The cbcpumask defines which
|
|
||||||
processors are allowed to be used as the serialization callback processor.
|
|
||||||
The workqueue wq is where the work will actually be done; it should be
|
|
||||||
a multithreaded queue, naturally.
|
|
||||||
|
|
||||||
To allocate a padata instance with the cpu_possible_mask for both
|
|
||||||
cpumasks this helper function can be used::
|
|
||||||
|
|
||||||
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq);
|
|
||||||
|
|
||||||
Note: Padata maintains two kinds of cpumasks internally. The user supplied
|
|
||||||
cpumasks, submitted by padata_alloc/padata_alloc_possible and the 'usable'
|
|
||||||
cpumasks. The usable cpumasks are always a subset of active CPUs in the
|
|
||||||
user supplied cpumasks; these are the cpumasks padata actually uses. So
|
|
||||||
it is legal to supply a cpumask to padata that contains offline CPUs.
|
|
||||||
Once an offline CPU in the user supplied cpumask comes online, padata
|
|
||||||
is going to use it.
|
|
||||||
|
|
||||||
There are functions for enabling and disabling the instance::
|
|
||||||
|
|
||||||
int padata_start(struct padata_instance *pinst);
|
|
||||||
void padata_stop(struct padata_instance *pinst);
|
|
||||||
|
|
||||||
These functions are setting or clearing the "PADATA_INIT" flag;
|
|
||||||
if that flag is not set, other functions will refuse to work.
|
|
||||||
padata_start returns zero on success (flag set) or -EINVAL if the
|
|
||||||
padata cpumask contains no active CPU (flag not set).
|
|
||||||
padata_stop clears the flag and blocks until the padata instance
|
|
||||||
is unused.
|
|
||||||
|
|
||||||
The list of CPUs to be used can be adjusted with these functions::
|
|
||||||
|
|
||||||
int padata_set_cpumasks(struct padata_instance *pinst,
|
|
||||||
cpumask_var_t pcpumask,
|
|
||||||
cpumask_var_t cbcpumask);
|
|
||||||
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
|
||||||
cpumask_var_t cpumask);
|
|
||||||
int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
|
|
||||||
int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
|
|
||||||
|
|
||||||
Changing the CPU masks are expensive operations, though, so it should not be
|
|
||||||
done with great frequency.
|
|
||||||
|
|
||||||
It's possible to change both cpumasks of a padata instance with
|
|
||||||
padata_set_cpumasks by specifying the cpumasks for parallel execution (pcpumask)
|
|
||||||
and for the serial callback function (cbcpumask). padata_set_cpumask is used to
|
|
||||||
change just one of the cpumasks. Here cpumask_type is one of PADATA_CPU_SERIAL,
|
|
||||||
PADATA_CPU_PARALLEL and cpumask specifies the new cpumask to use.
|
|
||||||
To simply add or remove one CPU from a certain cpumask the functions
|
|
||||||
padata_add_cpu/padata_remove_cpu are used. cpu specifies the CPU to add or
|
|
||||||
remove and mask is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL.
|
|
||||||
|
|
||||||
If a user is interested in padata cpumask changes, he can register to
|
|
||||||
the padata cpumask change notifier::
|
|
||||||
|
|
||||||
int padata_register_cpumask_notifier(struct padata_instance *pinst,
|
|
||||||
struct notifier_block *nblock);
|
|
||||||
|
|
||||||
To unregister from that notifier::
|
|
||||||
|
|
||||||
int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
|
|
||||||
struct notifier_block *nblock);
|
|
||||||
|
|
||||||
The padata cpumask change notifier notifies about changes of the usable
|
|
||||||
cpumasks, i.e. the subset of active CPUs in the user supplied cpumask.
|
|
||||||
|
|
||||||
Padata calls the notifier chain with::
|
|
||||||
|
|
||||||
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
|
|
||||||
notification_mask,
|
|
||||||
&pd_new->cpumask);
|
|
||||||
|
|
||||||
Here cpumask_change_notifier is registered notifier, notification_mask
|
|
||||||
is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL and cpumask is a pointer
|
|
||||||
to a struct padata_cpumask that contains the new cpumask information.
|
|
||||||
|
|
||||||
Actually submitting work to the padata instance requires the creation of a
|
|
||||||
padata_priv structure::
|
|
||||||
|
|
||||||
struct padata_priv {
|
|
||||||
/* Other stuff here... */
|
|
||||||
void (*parallel)(struct padata_priv *padata);
|
|
||||||
void (*serial)(struct padata_priv *padata);
|
|
||||||
};
|
|
||||||
|
|
||||||
This structure will almost certainly be embedded within some larger
|
|
||||||
structure specific to the work to be done. Most of its fields are private to
|
|
||||||
padata, but the structure should be zeroed at initialisation time, and the
|
|
||||||
parallel() and serial() functions should be provided. Those functions will
|
|
||||||
be called in the process of getting the work done as we will see
|
|
||||||
momentarily.
|
|
||||||
|
|
||||||
The submission of work is done with::
|
|
||||||
|
|
||||||
int padata_do_parallel(struct padata_instance *pinst,
|
|
||||||
struct padata_priv *padata, int cb_cpu);
|
|
||||||
|
|
||||||
The pinst and padata structures must be set up as described above; cb_cpu
|
|
||||||
specifies which CPU will be used for the final callback when the work is
|
|
||||||
done; it must be in the current instance's CPU mask. The return value from
|
|
||||||
padata_do_parallel() is zero on success, indicating that the work is in
|
|
||||||
progress. -EBUSY means that somebody, somewhere else is messing with the
|
|
||||||
instance's CPU mask, while -EINVAL is a complaint about cb_cpu not being
|
|
||||||
in that CPU mask or about a not running instance.
|
|
||||||
|
|
||||||
Each task submitted to padata_do_parallel() will, in turn, be passed to
|
|
||||||
exactly one call to the above-mentioned parallel() function, on one CPU, so
|
|
||||||
true parallelism is achieved by submitting multiple tasks. parallel() runs with
|
|
||||||
software interrupts disabled and thus cannot sleep. The parallel()
|
|
||||||
function gets the padata_priv structure pointer as its lone parameter;
|
|
||||||
information about the actual work to be done is probably obtained by using
|
|
||||||
container_of() to find the enclosing structure.
|
|
||||||
|
|
||||||
Note that parallel() has no return value; the padata subsystem assumes that
|
|
||||||
parallel() will take responsibility for the task from this point. The work
|
|
||||||
need not be completed during this call, but, if parallel() leaves work
|
|
||||||
outstanding, it should be prepared to be called again with a new job before
|
|
||||||
the previous one completes. When a task does complete, parallel() (or
|
|
||||||
whatever function actually finishes the job) should inform padata of the
|
|
||||||
fact with a call to::
|
|
||||||
|
|
||||||
void padata_do_serial(struct padata_priv *padata);
|
|
||||||
|
|
||||||
At some point in the future, padata_do_serial() will trigger a call to the
|
|
||||||
serial() function in the padata_priv structure. That call will happen on
|
|
||||||
the CPU requested in the initial call to padata_do_parallel(); it, too, is
|
|
||||||
run with local software interrupts disabled.
|
|
||||||
Note that this call may be deferred for a while since the padata code takes
|
|
||||||
pains to ensure that tasks are completed in the order in which they were
|
|
||||||
submitted.
|
|
||||||
|
|
||||||
The one remaining function in the padata API should be called to clean up
|
|
||||||
when a padata instance is no longer needed::
|
|
||||||
|
|
||||||
void padata_free(struct padata_instance *pinst);
|
|
||||||
|
|
||||||
This function will busy-wait while any remaining tasks are completed, so it
|
|
||||||
might be best not to call it while there is work outstanding.
|
|
@ -112,6 +112,83 @@ kernel are handled by the kernel driver. Other RPC messages will be forwarded to
|
|||||||
tee-supplicant without further involvement of the driver, except switching
|
tee-supplicant without further involvement of the driver, except switching
|
||||||
shared memory buffer representation.
|
shared memory buffer representation.
|
||||||
|
|
||||||
|
AMD-TEE driver
|
||||||
|
==============
|
||||||
|
|
||||||
|
The AMD-TEE driver handles the communication with AMD's TEE environment. The
|
||||||
|
TEE environment is provided by AMD Secure Processor.
|
||||||
|
|
||||||
|
The AMD Secure Processor (formerly called Platform Security Processor or PSP)
|
||||||
|
is a dedicated processor that features ARM TrustZone technology, along with a
|
||||||
|
software-based Trusted Execution Environment (TEE) designed to enable
|
||||||
|
third-party Trusted Applications. This feature is currently enabled only for
|
||||||
|
APUs.
|
||||||
|
|
||||||
|
The following picture shows a high level overview of AMD-TEE::
|
||||||
|
|
||||||
|
|
|
||||||
|
x86 |
|
||||||
|
|
|
||||||
|
User space (Kernel space) | AMD Secure Processor (PSP)
|
||||||
|
~~~~~~~~~~ ~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
|
||||||
|
+--------+ | +-------------+
|
||||||
|
| Client | | | Trusted |
|
||||||
|
+--------+ | | Application |
|
||||||
|
/\ | +-------------+
|
||||||
|
|| | /\
|
||||||
|
|| | ||
|
||||||
|
|| | \/
|
||||||
|
|| | +----------+
|
||||||
|
|| | | TEE |
|
||||||
|
|| | | Internal |
|
||||||
|
\/ | | API |
|
||||||
|
+---------+ +-----------+---------+ +----------+
|
||||||
|
| TEE | | TEE | AMD-TEE | | AMD-TEE |
|
||||||
|
| Client | | subsystem | driver | | Trusted |
|
||||||
|
| API | | | | | OS |
|
||||||
|
+---------+-----------+----+------+---------+---------+----------+
|
||||||
|
| Generic TEE API | | ASP | Mailbox |
|
||||||
|
| IOCTL (TEE_IOC_*) | | driver | Register Protocol |
|
||||||
|
+--------------------------+ +---------+--------------------+
|
||||||
|
|
||||||
|
At the lowest level (in x86), the AMD Secure Processor (ASP) driver uses the
|
||||||
|
CPU to PSP mailbox regsister to submit commands to the PSP. The format of the
|
||||||
|
command buffer is opaque to the ASP driver. It's role is to submit commands to
|
||||||
|
the secure processor and return results to AMD-TEE driver. The interface
|
||||||
|
between AMD-TEE driver and AMD Secure Processor driver can be found in [6].
|
||||||
|
|
||||||
|
The AMD-TEE driver packages the command buffer payload for processing in TEE.
|
||||||
|
The command buffer format for the different TEE commands can be found in [7].
|
||||||
|
|
||||||
|
The TEE commands supported by AMD-TEE Trusted OS are:
|
||||||
|
* TEE_CMD_ID_LOAD_TA - loads a Trusted Application (TA) binary into
|
||||||
|
TEE environment.
|
||||||
|
* TEE_CMD_ID_UNLOAD_TA - unloads TA binary from TEE environment.
|
||||||
|
* TEE_CMD_ID_OPEN_SESSION - opens a session with a loaded TA.
|
||||||
|
* TEE_CMD_ID_CLOSE_SESSION - closes session with loaded TA
|
||||||
|
* TEE_CMD_ID_INVOKE_CMD - invokes a command with loaded TA
|
||||||
|
* TEE_CMD_ID_MAP_SHARED_MEM - maps shared memory
|
||||||
|
* TEE_CMD_ID_UNMAP_SHARED_MEM - unmaps shared memory
|
||||||
|
|
||||||
|
AMD-TEE Trusted OS is the firmware running on AMD Secure Processor.
|
||||||
|
|
||||||
|
The AMD-TEE driver registers itself with TEE subsystem and implements the
|
||||||
|
following driver function callbacks:
|
||||||
|
|
||||||
|
* get_version - returns the driver implementation id and capability.
|
||||||
|
* open - sets up the driver context data structure.
|
||||||
|
* release - frees up driver resources.
|
||||||
|
* open_session - loads the TA binary and opens session with loaded TA.
|
||||||
|
* close_session - closes session with loaded TA and unloads it.
|
||||||
|
* invoke_func - invokes a command with loaded TA.
|
||||||
|
|
||||||
|
cancel_req driver callback is not supported by AMD-TEE.
|
||||||
|
|
||||||
|
The GlobalPlatform TEE Client API [5] can be used by the user space (client) to
|
||||||
|
talk to AMD's TEE. AMD's TEE provides a secure environment for loading, opening
|
||||||
|
a session, invoking commands and clossing session with TA.
|
||||||
|
|
||||||
References
|
References
|
||||||
==========
|
==========
|
||||||
|
|
||||||
@ -125,3 +202,7 @@ References
|
|||||||
|
|
||||||
[5] http://www.globalplatform.org/specificationsdevice.asp look for
|
[5] http://www.globalplatform.org/specificationsdevice.asp look for
|
||||||
"TEE Client API Specification v1.0" and click download.
|
"TEE Client API Specification v1.0" and click download.
|
||||||
|
|
||||||
|
[6] include/linux/psp-tee.h
|
||||||
|
|
||||||
|
[7] drivers/tee/amdtee/amdtee_if.h
|
||||||
|
@ -791,7 +791,6 @@ F: include/uapi/rdma/efa-abi.h
|
|||||||
|
|
||||||
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
|
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
|
||||||
M: Tom Lendacky <thomas.lendacky@amd.com>
|
M: Tom Lendacky <thomas.lendacky@amd.com>
|
||||||
M: Gary Hook <gary.hook@amd.com>
|
|
||||||
L: linux-crypto@vger.kernel.org
|
L: linux-crypto@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/crypto/ccp/
|
F: drivers/crypto/ccp/
|
||||||
@ -12478,7 +12477,7 @@ L: linux-crypto@vger.kernel.org
|
|||||||
S: Maintained
|
S: Maintained
|
||||||
F: kernel/padata.c
|
F: kernel/padata.c
|
||||||
F: include/linux/padata.h
|
F: include/linux/padata.h
|
||||||
F: Documentation/padata.txt
|
F: Documentation/core-api/padata.rst
|
||||||
|
|
||||||
PAGE POOL
|
PAGE POOL
|
||||||
M: Jesper Dangaard Brouer <hawk@kernel.org>
|
M: Jesper Dangaard Brouer <hawk@kernel.org>
|
||||||
@ -14568,7 +14567,7 @@ F: drivers/media/i2c/s5k5baf.c
|
|||||||
SAMSUNG S5P Security SubSystem (SSS) DRIVER
|
SAMSUNG S5P Security SubSystem (SSS) DRIVER
|
||||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||||
M: Vladimir Zapolskiy <vz@mleia.com>
|
M: Vladimir Zapolskiy <vz@mleia.com>
|
||||||
M: Kamil Konieczny <k.konieczny@partner.samsung.com>
|
M: Kamil Konieczny <k.konieczny@samsung.com>
|
||||||
L: linux-crypto@vger.kernel.org
|
L: linux-crypto@vger.kernel.org
|
||||||
L: linux-samsung-soc@vger.kernel.org
|
L: linux-samsung-soc@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -138,14 +138,8 @@ static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = ce_aes_expandkey(ctx, in_key, key_len);
|
return ce_aes_expandkey(ctx, in_key, key_len);
|
||||||
if (!ret)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct crypto_aes_xts_ctx {
|
struct crypto_aes_xts_ctx {
|
||||||
@ -167,11 +161,7 @@ static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
if (!ret)
|
if (!ret)
|
||||||
ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
|
ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
|
||||||
key_len / 2);
|
key_len / 2);
|
||||||
if (!ret)
|
return ret;
|
||||||
return 0;
|
|
||||||
|
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ecb_encrypt(struct skcipher_request *req)
|
static int ecb_encrypt(struct skcipher_request *req)
|
||||||
|
@ -115,7 +115,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
|
|||||||
if (nbytes < walk.total)
|
if (nbytes < walk.total)
|
||||||
nbytes = round_down(nbytes, walk.stride);
|
nbytes = round_down(nbytes, walk.stride);
|
||||||
|
|
||||||
if (!neon) {
|
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
|
||||||
chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
|
chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
|
||||||
nbytes, state, ctx->nrounds);
|
nbytes, state, ctx->nrounds);
|
||||||
state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
|
state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
|
||||||
@ -159,7 +159,7 @@ static int do_xchacha(struct skcipher_request *req, bool neon)
|
|||||||
|
|
||||||
chacha_init_generic(state, ctx->key, req->iv);
|
chacha_init_generic(state, ctx->key, req->iv);
|
||||||
|
|
||||||
if (!neon) {
|
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
|
||||||
hchacha_block_arm(state, subctx.key, ctx->nrounds);
|
hchacha_block_arm(state, subctx.key, ctx->nrounds);
|
||||||
} else {
|
} else {
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
|
@ -54,10 +54,8 @@ static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
|
|||||||
{
|
{
|
||||||
u32 *mctx = crypto_shash_ctx(hash);
|
u32 *mctx = crypto_shash_ctx(hash);
|
||||||
|
|
||||||
if (keylen != sizeof(u32)) {
|
if (keylen != sizeof(u32))
|
||||||
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
*mctx = le32_to_cpup((__le32 *)key);
|
*mctx = le32_to_cpup((__le32 *)key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -163,10 +163,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||||||
struct ghash_key *key = crypto_shash_ctx(tfm);
|
struct ghash_key *key = crypto_shash_ctx(tfm);
|
||||||
be128 h;
|
be128 h;
|
||||||
|
|
||||||
if (keylen != GHASH_BLOCK_SIZE) {
|
if (keylen != GHASH_BLOCK_SIZE)
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
/* needed for the fallback */
|
/* needed for the fallback */
|
||||||
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
|
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
|
||||||
@ -296,16 +294,11 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||||||
{
|
{
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
|
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
|
||||||
int err;
|
|
||||||
|
|
||||||
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
|
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
|
||||||
& CRYPTO_TFM_REQ_MASK);
|
& CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_ahash_setkey(child, key, keylen);
|
return crypto_ahash_setkey(child, key, keylen);
|
||||||
crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
|
|
||||||
& CRYPTO_TFM_RES_MASK);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
|
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
void poly1305_init_arm(void *state, const u8 *key);
|
void poly1305_init_arm(void *state, const u8 *key);
|
||||||
void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
|
void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
|
||||||
void poly1305_emit_arm(void *state, __le32 *digest, const u32 *nonce);
|
void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce);
|
||||||
|
|
||||||
void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
|
void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
|
||||||
{
|
{
|
||||||
@ -179,9 +179,6 @@ EXPORT_SYMBOL(poly1305_update_arch);
|
|||||||
|
|
||||||
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
||||||
{
|
{
|
||||||
__le32 digest[4];
|
|
||||||
u64 f = 0;
|
|
||||||
|
|
||||||
if (unlikely(dctx->buflen)) {
|
if (unlikely(dctx->buflen)) {
|
||||||
dctx->buf[dctx->buflen++] = 1;
|
dctx->buf[dctx->buflen++] = 1;
|
||||||
memset(dctx->buf + dctx->buflen, 0,
|
memset(dctx->buf + dctx->buflen, 0,
|
||||||
@ -189,18 +186,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
|||||||
poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
poly1305_emit_arm(&dctx->h, digest, dctx->s);
|
poly1305_emit_arm(&dctx->h, dst, dctx->s);
|
||||||
|
|
||||||
/* mac = (h + s) % (2^128) */
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[0]);
|
|
||||||
put_unaligned_le32(f, dst);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[1]);
|
|
||||||
put_unaligned_le32(f, dst + 4);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[2]);
|
|
||||||
put_unaligned_le32(f, dst + 8);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[3]);
|
|
||||||
put_unaligned_le32(f, dst + 12);
|
|
||||||
|
|
||||||
*dctx = (struct poly1305_desc_ctx){};
|
*dctx = (struct poly1305_desc_ctx){};
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(poly1305_final_arch);
|
EXPORT_SYMBOL(poly1305_final_arch);
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
* void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
|
* void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
|
||||||
* u32 *macp, u8 const rk[], u32 rounds);
|
* u32 *macp, u8 const rk[], u32 rounds);
|
||||||
*/
|
*/
|
||||||
ENTRY(ce_aes_ccm_auth_data)
|
SYM_FUNC_START(ce_aes_ccm_auth_data)
|
||||||
ldr w8, [x3] /* leftover from prev round? */
|
ldr w8, [x3] /* leftover from prev round? */
|
||||||
ld1 {v0.16b}, [x0] /* load mac */
|
ld1 {v0.16b}, [x0] /* load mac */
|
||||||
cbz w8, 1f
|
cbz w8, 1f
|
||||||
@ -81,13 +81,13 @@ ENTRY(ce_aes_ccm_auth_data)
|
|||||||
st1 {v0.16b}, [x0]
|
st1 {v0.16b}, [x0]
|
||||||
10: str w8, [x3]
|
10: str w8, [x3]
|
||||||
ret
|
ret
|
||||||
ENDPROC(ce_aes_ccm_auth_data)
|
SYM_FUNC_END(ce_aes_ccm_auth_data)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
|
* void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
|
||||||
* u32 rounds);
|
* u32 rounds);
|
||||||
*/
|
*/
|
||||||
ENTRY(ce_aes_ccm_final)
|
SYM_FUNC_START(ce_aes_ccm_final)
|
||||||
ld1 {v3.4s}, [x2], #16 /* load first round key */
|
ld1 {v3.4s}, [x2], #16 /* load first round key */
|
||||||
ld1 {v0.16b}, [x0] /* load mac */
|
ld1 {v0.16b}, [x0] /* load mac */
|
||||||
cmp w3, #12 /* which key size? */
|
cmp w3, #12 /* which key size? */
|
||||||
@ -121,7 +121,7 @@ ENTRY(ce_aes_ccm_final)
|
|||||||
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
|
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
|
||||||
st1 {v0.16b}, [x0] /* store result */
|
st1 {v0.16b}, [x0] /* store result */
|
||||||
ret
|
ret
|
||||||
ENDPROC(ce_aes_ccm_final)
|
SYM_FUNC_END(ce_aes_ccm_final)
|
||||||
|
|
||||||
.macro aes_ccm_do_crypt,enc
|
.macro aes_ccm_do_crypt,enc
|
||||||
ldr x8, [x6, #8] /* load lower ctr */
|
ldr x8, [x6, #8] /* load lower ctr */
|
||||||
@ -212,10 +212,10 @@ CPU_LE( rev x8, x8 )
|
|||||||
* u8 const rk[], u32 rounds, u8 mac[],
|
* u8 const rk[], u32 rounds, u8 mac[],
|
||||||
* u8 ctr[]);
|
* u8 ctr[]);
|
||||||
*/
|
*/
|
||||||
ENTRY(ce_aes_ccm_encrypt)
|
SYM_FUNC_START(ce_aes_ccm_encrypt)
|
||||||
aes_ccm_do_crypt 1
|
aes_ccm_do_crypt 1
|
||||||
ENDPROC(ce_aes_ccm_encrypt)
|
SYM_FUNC_END(ce_aes_ccm_encrypt)
|
||||||
|
|
||||||
ENTRY(ce_aes_ccm_decrypt)
|
SYM_FUNC_START(ce_aes_ccm_decrypt)
|
||||||
aes_ccm_do_crypt 0
|
aes_ccm_do_crypt 0
|
||||||
ENDPROC(ce_aes_ccm_decrypt)
|
SYM_FUNC_END(ce_aes_ccm_decrypt)
|
||||||
|
@ -47,14 +47,8 @@ static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
|
|||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = ce_aes_expandkey(ctx, in_key, key_len);
|
return ce_aes_expandkey(ctx, in_key, key_len);
|
||||||
if (!ret)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
.arch armv8-a+crypto
|
.arch armv8-a+crypto
|
||||||
|
|
||||||
ENTRY(__aes_ce_encrypt)
|
SYM_FUNC_START(__aes_ce_encrypt)
|
||||||
sub w3, w3, #2
|
sub w3, w3, #2
|
||||||
ld1 {v0.16b}, [x2]
|
ld1 {v0.16b}, [x2]
|
||||||
ld1 {v1.4s}, [x0], #16
|
ld1 {v1.4s}, [x0], #16
|
||||||
@ -34,9 +34,9 @@ ENTRY(__aes_ce_encrypt)
|
|||||||
eor v0.16b, v0.16b, v3.16b
|
eor v0.16b, v0.16b, v3.16b
|
||||||
st1 {v0.16b}, [x1]
|
st1 {v0.16b}, [x1]
|
||||||
ret
|
ret
|
||||||
ENDPROC(__aes_ce_encrypt)
|
SYM_FUNC_END(__aes_ce_encrypt)
|
||||||
|
|
||||||
ENTRY(__aes_ce_decrypt)
|
SYM_FUNC_START(__aes_ce_decrypt)
|
||||||
sub w3, w3, #2
|
sub w3, w3, #2
|
||||||
ld1 {v0.16b}, [x2]
|
ld1 {v0.16b}, [x2]
|
||||||
ld1 {v1.4s}, [x0], #16
|
ld1 {v1.4s}, [x0], #16
|
||||||
@ -62,23 +62,23 @@ ENTRY(__aes_ce_decrypt)
|
|||||||
eor v0.16b, v0.16b, v3.16b
|
eor v0.16b, v0.16b, v3.16b
|
||||||
st1 {v0.16b}, [x1]
|
st1 {v0.16b}, [x1]
|
||||||
ret
|
ret
|
||||||
ENDPROC(__aes_ce_decrypt)
|
SYM_FUNC_END(__aes_ce_decrypt)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __aes_ce_sub() - use the aese instruction to perform the AES sbox
|
* __aes_ce_sub() - use the aese instruction to perform the AES sbox
|
||||||
* substitution on each byte in 'input'
|
* substitution on each byte in 'input'
|
||||||
*/
|
*/
|
||||||
ENTRY(__aes_ce_sub)
|
SYM_FUNC_START(__aes_ce_sub)
|
||||||
dup v1.4s, w0
|
dup v1.4s, w0
|
||||||
movi v0.16b, #0
|
movi v0.16b, #0
|
||||||
aese v0.16b, v1.16b
|
aese v0.16b, v1.16b
|
||||||
umov w0, v0.s[0]
|
umov w0, v0.s[0]
|
||||||
ret
|
ret
|
||||||
ENDPROC(__aes_ce_sub)
|
SYM_FUNC_END(__aes_ce_sub)
|
||||||
|
|
||||||
ENTRY(__aes_ce_invert)
|
SYM_FUNC_START(__aes_ce_invert)
|
||||||
ld1 {v0.4s}, [x1]
|
ld1 {v0.4s}, [x1]
|
||||||
aesimc v1.16b, v0.16b
|
aesimc v1.16b, v0.16b
|
||||||
st1 {v1.4s}, [x0]
|
st1 {v1.4s}, [x0]
|
||||||
ret
|
ret
|
||||||
ENDPROC(__aes_ce_invert)
|
SYM_FUNC_END(__aes_ce_invert)
|
||||||
|
@ -143,14 +143,8 @@ int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
|||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = ce_aes_expandkey(ctx, in_key, key_len);
|
return ce_aes_expandkey(ctx, in_key, key_len);
|
||||||
if (!ret)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ce_aes_setkey);
|
EXPORT_SYMBOL(ce_aes_setkey);
|
||||||
|
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
|
||||||
#define AES_ENTRY(func) ENTRY(ce_ ## func)
|
#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
|
||||||
#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
|
#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)
|
||||||
|
|
||||||
.arch armv8-a+crypto
|
.arch armv8-a+crypto
|
||||||
|
|
||||||
|
@ -122,11 +122,11 @@ CPU_BE( rev w7, w7 )
|
|||||||
ret
|
ret
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
ENTRY(__aes_arm64_encrypt)
|
SYM_FUNC_START(__aes_arm64_encrypt)
|
||||||
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
|
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
|
||||||
ENDPROC(__aes_arm64_encrypt)
|
SYM_FUNC_END(__aes_arm64_encrypt)
|
||||||
|
|
||||||
.align 5
|
.align 5
|
||||||
ENTRY(__aes_arm64_decrypt)
|
SYM_FUNC_START(__aes_arm64_decrypt)
|
||||||
do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
|
do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
|
||||||
ENDPROC(__aes_arm64_decrypt)
|
SYM_FUNC_END(__aes_arm64_decrypt)
|
||||||
|
@ -132,13 +132,8 @@ static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = aes_expandkey(ctx, in_key, key_len);
|
return aes_expandkey(ctx, in_key, key_len);
|
||||||
if (ret)
|
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
|
static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
|
||||||
@ -155,11 +150,7 @@ static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
|
|||||||
if (!ret)
|
if (!ret)
|
||||||
ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
|
ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
|
||||||
key_len / 2);
|
key_len / 2);
|
||||||
if (!ret)
|
return ret;
|
||||||
return 0;
|
|
||||||
|
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
|
static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
|
||||||
@ -173,19 +164,12 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
|
|||||||
|
|
||||||
ret = aes_expandkey(&ctx->key1, in_key, key_len);
|
ret = aes_expandkey(&ctx->key1, in_key, key_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
return ret;
|
||||||
|
|
||||||
desc->tfm = ctx->hash;
|
desc->tfm = ctx->hash;
|
||||||
crypto_shash_digest(desc, in_key, key_len, digest);
|
crypto_shash_digest(desc, in_key, key_len, digest);
|
||||||
|
|
||||||
ret = aes_expandkey(&ctx->key2, digest, sizeof(digest));
|
return aes_expandkey(&ctx->key2, digest, sizeof(digest));
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
out:
|
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
|
static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
|
||||||
@ -791,13 +775,8 @@ static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
|
|||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
|
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
|
||||||
int err;
|
|
||||||
|
|
||||||
err = aes_expandkey(&ctx->key, in_key, key_len);
|
return aes_expandkey(&ctx->key, in_key, key_len);
|
||||||
if (err)
|
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cmac_gf128_mul_by_x(be128 *y, const be128 *x)
|
static void cmac_gf128_mul_by_x(be128 *y, const be128 *x)
|
||||||
|
@ -22,26 +22,26 @@
|
|||||||
#define ST5(x...) x
|
#define ST5(x...) x
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
aes_encrypt_block4x:
|
SYM_FUNC_START_LOCAL(aes_encrypt_block4x)
|
||||||
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
||||||
ret
|
ret
|
||||||
ENDPROC(aes_encrypt_block4x)
|
SYM_FUNC_END(aes_encrypt_block4x)
|
||||||
|
|
||||||
aes_decrypt_block4x:
|
SYM_FUNC_START_LOCAL(aes_decrypt_block4x)
|
||||||
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
||||||
ret
|
ret
|
||||||
ENDPROC(aes_decrypt_block4x)
|
SYM_FUNC_END(aes_decrypt_block4x)
|
||||||
|
|
||||||
#if MAX_STRIDE == 5
|
#if MAX_STRIDE == 5
|
||||||
aes_encrypt_block5x:
|
SYM_FUNC_START_LOCAL(aes_encrypt_block5x)
|
||||||
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
||||||
ret
|
ret
|
||||||
ENDPROC(aes_encrypt_block5x)
|
SYM_FUNC_END(aes_encrypt_block5x)
|
||||||
|
|
||||||
aes_decrypt_block5x:
|
SYM_FUNC_START_LOCAL(aes_decrypt_block5x)
|
||||||
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
||||||
ret
|
ret
|
||||||
ENDPROC(aes_decrypt_block5x)
|
SYM_FUNC_END(aes_decrypt_block5x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -8,8 +8,8 @@
|
|||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
|
||||||
#define AES_ENTRY(func) ENTRY(neon_ ## func)
|
#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
|
||||||
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
|
#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)
|
||||||
|
|
||||||
xtsmask .req v7
|
xtsmask .req v7
|
||||||
cbciv .req v7
|
cbciv .req v7
|
||||||
|
@ -380,7 +380,7 @@ ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f
|
|||||||
/*
|
/*
|
||||||
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
|
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
|
||||||
*/
|
*/
|
||||||
ENTRY(aesbs_convert_key)
|
SYM_FUNC_START(aesbs_convert_key)
|
||||||
ld1 {v7.4s}, [x1], #16 // load round 0 key
|
ld1 {v7.4s}, [x1], #16 // load round 0 key
|
||||||
ld1 {v17.4s}, [x1], #16 // load round 1 key
|
ld1 {v17.4s}, [x1], #16 // load round 1 key
|
||||||
|
|
||||||
@ -425,10 +425,10 @@ ENTRY(aesbs_convert_key)
|
|||||||
eor v17.16b, v17.16b, v7.16b
|
eor v17.16b, v17.16b, v7.16b
|
||||||
str q17, [x0]
|
str q17, [x0]
|
||||||
ret
|
ret
|
||||||
ENDPROC(aesbs_convert_key)
|
SYM_FUNC_END(aesbs_convert_key)
|
||||||
|
|
||||||
.align 4
|
.align 4
|
||||||
aesbs_encrypt8:
|
SYM_FUNC_START_LOCAL(aesbs_encrypt8)
|
||||||
ldr q9, [bskey], #16 // round 0 key
|
ldr q9, [bskey], #16 // round 0 key
|
||||||
ldr q8, M0SR
|
ldr q8, M0SR
|
||||||
ldr q24, SR
|
ldr q24, SR
|
||||||
@ -488,10 +488,10 @@ aesbs_encrypt8:
|
|||||||
eor v2.16b, v2.16b, v12.16b
|
eor v2.16b, v2.16b, v12.16b
|
||||||
eor v5.16b, v5.16b, v12.16b
|
eor v5.16b, v5.16b, v12.16b
|
||||||
ret
|
ret
|
||||||
ENDPROC(aesbs_encrypt8)
|
SYM_FUNC_END(aesbs_encrypt8)
|
||||||
|
|
||||||
.align 4
|
.align 4
|
||||||
aesbs_decrypt8:
|
SYM_FUNC_START_LOCAL(aesbs_decrypt8)
|
||||||
lsl x9, rounds, #7
|
lsl x9, rounds, #7
|
||||||
add bskey, bskey, x9
|
add bskey, bskey, x9
|
||||||
|
|
||||||
@ -553,7 +553,7 @@ aesbs_decrypt8:
|
|||||||
eor v3.16b, v3.16b, v12.16b
|
eor v3.16b, v3.16b, v12.16b
|
||||||
eor v5.16b, v5.16b, v12.16b
|
eor v5.16b, v5.16b, v12.16b
|
||||||
ret
|
ret
|
||||||
ENDPROC(aesbs_decrypt8)
|
SYM_FUNC_END(aesbs_decrypt8)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||||
@ -621,21 +621,21 @@ ENDPROC(aesbs_decrypt8)
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
.align 4
|
.align 4
|
||||||
ENTRY(aesbs_ecb_encrypt)
|
SYM_FUNC_START(aesbs_ecb_encrypt)
|
||||||
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
|
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
|
||||||
ENDPROC(aesbs_ecb_encrypt)
|
SYM_FUNC_END(aesbs_ecb_encrypt)
|
||||||
|
|
||||||
.align 4
|
.align 4
|
||||||
ENTRY(aesbs_ecb_decrypt)
|
SYM_FUNC_START(aesbs_ecb_decrypt)
|
||||||
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
|
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
|
||||||
ENDPROC(aesbs_ecb_decrypt)
|
SYM_FUNC_END(aesbs_ecb_decrypt)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||||
* int blocks, u8 iv[])
|
* int blocks, u8 iv[])
|
||||||
*/
|
*/
|
||||||
.align 4
|
.align 4
|
||||||
ENTRY(aesbs_cbc_decrypt)
|
SYM_FUNC_START(aesbs_cbc_decrypt)
|
||||||
frame_push 6
|
frame_push 6
|
||||||
|
|
||||||
mov x19, x0
|
mov x19, x0
|
||||||
@ -720,7 +720,7 @@ ENTRY(aesbs_cbc_decrypt)
|
|||||||
|
|
||||||
2: frame_pop
|
2: frame_pop
|
||||||
ret
|
ret
|
||||||
ENDPROC(aesbs_cbc_decrypt)
|
SYM_FUNC_END(aesbs_cbc_decrypt)
|
||||||
|
|
||||||
.macro next_tweak, out, in, const, tmp
|
.macro next_tweak, out, in, const, tmp
|
||||||
sshr \tmp\().2d, \in\().2d, #63
|
sshr \tmp\().2d, \in\().2d, #63
|
||||||
@ -736,7 +736,7 @@ ENDPROC(aesbs_cbc_decrypt)
|
|||||||
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||||
* int blocks, u8 iv[])
|
* int blocks, u8 iv[])
|
||||||
*/
|
*/
|
||||||
__xts_crypt8:
|
SYM_FUNC_START_LOCAL(__xts_crypt8)
|
||||||
mov x6, #1
|
mov x6, #1
|
||||||
lsl x6, x6, x23
|
lsl x6, x6, x23
|
||||||
subs w23, w23, #8
|
subs w23, w23, #8
|
||||||
@ -789,7 +789,7 @@ __xts_crypt8:
|
|||||||
0: mov bskey, x21
|
0: mov bskey, x21
|
||||||
mov rounds, x22
|
mov rounds, x22
|
||||||
br x7
|
br x7
|
||||||
ENDPROC(__xts_crypt8)
|
SYM_FUNC_END(__xts_crypt8)
|
||||||
|
|
||||||
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
|
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
|
||||||
frame_push 6, 64
|
frame_push 6, 64
|
||||||
@ -854,13 +854,13 @@ ENDPROC(__xts_crypt8)
|
|||||||
ret
|
ret
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
ENTRY(aesbs_xts_encrypt)
|
SYM_FUNC_START(aesbs_xts_encrypt)
|
||||||
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
|
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
|
||||||
ENDPROC(aesbs_xts_encrypt)
|
SYM_FUNC_END(aesbs_xts_encrypt)
|
||||||
|
|
||||||
ENTRY(aesbs_xts_decrypt)
|
SYM_FUNC_START(aesbs_xts_decrypt)
|
||||||
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
|
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
|
||||||
ENDPROC(aesbs_xts_decrypt)
|
SYM_FUNC_END(aesbs_xts_decrypt)
|
||||||
|
|
||||||
.macro next_ctr, v
|
.macro next_ctr, v
|
||||||
mov \v\().d[1], x8
|
mov \v\().d[1], x8
|
||||||
@ -874,7 +874,7 @@ ENDPROC(aesbs_xts_decrypt)
|
|||||||
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||||
* int rounds, int blocks, u8 iv[], u8 final[])
|
* int rounds, int blocks, u8 iv[], u8 final[])
|
||||||
*/
|
*/
|
||||||
ENTRY(aesbs_ctr_encrypt)
|
SYM_FUNC_START(aesbs_ctr_encrypt)
|
||||||
frame_push 8
|
frame_push 8
|
||||||
|
|
||||||
mov x19, x0
|
mov x19, x0
|
||||||
@ -1002,4 +1002,4 @@ CPU_LE( rev x8, x8 )
|
|||||||
7: cbz x25, 8b
|
7: cbz x25, 8b
|
||||||
st1 {v5.16b}, [x25]
|
st1 {v5.16b}, [x25]
|
||||||
b 8b
|
b 8b
|
||||||
ENDPROC(aesbs_ctr_encrypt)
|
SYM_FUNC_END(aesbs_ctr_encrypt)
|
||||||
|
@ -36,7 +36,7 @@
|
|||||||
*
|
*
|
||||||
* Clobbers: w3, x10, v4, v12
|
* Clobbers: w3, x10, v4, v12
|
||||||
*/
|
*/
|
||||||
chacha_permute:
|
SYM_FUNC_START_LOCAL(chacha_permute)
|
||||||
|
|
||||||
adr_l x10, ROT8
|
adr_l x10, ROT8
|
||||||
ld1 {v12.4s}, [x10]
|
ld1 {v12.4s}, [x10]
|
||||||
@ -104,9 +104,9 @@ chacha_permute:
|
|||||||
b.ne .Ldoubleround
|
b.ne .Ldoubleround
|
||||||
|
|
||||||
ret
|
ret
|
||||||
ENDPROC(chacha_permute)
|
SYM_FUNC_END(chacha_permute)
|
||||||
|
|
||||||
ENTRY(chacha_block_xor_neon)
|
SYM_FUNC_START(chacha_block_xor_neon)
|
||||||
// x0: Input state matrix, s
|
// x0: Input state matrix, s
|
||||||
// x1: 1 data block output, o
|
// x1: 1 data block output, o
|
||||||
// x2: 1 data block input, i
|
// x2: 1 data block input, i
|
||||||
@ -143,9 +143,9 @@ ENTRY(chacha_block_xor_neon)
|
|||||||
|
|
||||||
ldp x29, x30, [sp], #16
|
ldp x29, x30, [sp], #16
|
||||||
ret
|
ret
|
||||||
ENDPROC(chacha_block_xor_neon)
|
SYM_FUNC_END(chacha_block_xor_neon)
|
||||||
|
|
||||||
ENTRY(hchacha_block_neon)
|
SYM_FUNC_START(hchacha_block_neon)
|
||||||
// x0: Input state matrix, s
|
// x0: Input state matrix, s
|
||||||
// x1: output (8 32-bit words)
|
// x1: output (8 32-bit words)
|
||||||
// w2: nrounds
|
// w2: nrounds
|
||||||
@ -163,7 +163,7 @@ ENTRY(hchacha_block_neon)
|
|||||||
|
|
||||||
ldp x29, x30, [sp], #16
|
ldp x29, x30, [sp], #16
|
||||||
ret
|
ret
|
||||||
ENDPROC(hchacha_block_neon)
|
SYM_FUNC_END(hchacha_block_neon)
|
||||||
|
|
||||||
a0 .req w12
|
a0 .req w12
|
||||||
a1 .req w13
|
a1 .req w13
|
||||||
@ -183,7 +183,7 @@ ENDPROC(hchacha_block_neon)
|
|||||||
a15 .req w28
|
a15 .req w28
|
||||||
|
|
||||||
.align 6
|
.align 6
|
||||||
ENTRY(chacha_4block_xor_neon)
|
SYM_FUNC_START(chacha_4block_xor_neon)
|
||||||
frame_push 10
|
frame_push 10
|
||||||
|
|
||||||
// x0: Input state matrix, s
|
// x0: Input state matrix, s
|
||||||
@ -845,7 +845,7 @@ CPU_BE( rev a15, a15 )
|
|||||||
eor v31.16b, v31.16b, v3.16b
|
eor v31.16b, v31.16b, v3.16b
|
||||||
st1 {v28.16b-v31.16b}, [x1]
|
st1 {v28.16b-v31.16b}, [x1]
|
||||||
b .Lout
|
b .Lout
|
||||||
ENDPROC(chacha_4block_xor_neon)
|
SYM_FUNC_END(chacha_4block_xor_neon)
|
||||||
|
|
||||||
.section ".rodata", "a", %progbits
|
.section ".rodata", "a", %progbits
|
||||||
.align L1_CACHE_SHIFT
|
.align L1_CACHE_SHIFT
|
||||||
|
@ -131,7 +131,7 @@
|
|||||||
tbl bd4.16b, {\bd\().16b}, perm4.16b
|
tbl bd4.16b, {\bd\().16b}, perm4.16b
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
__pmull_p8_core:
|
SYM_FUNC_START_LOCAL(__pmull_p8_core)
|
||||||
.L__pmull_p8_core:
|
.L__pmull_p8_core:
|
||||||
ext t4.8b, ad.8b, ad.8b, #1 // A1
|
ext t4.8b, ad.8b, ad.8b, #1 // A1
|
||||||
ext t5.8b, ad.8b, ad.8b, #2 // A2
|
ext t5.8b, ad.8b, ad.8b, #2 // A2
|
||||||
@ -194,7 +194,7 @@ __pmull_p8_core:
|
|||||||
eor t4.16b, t4.16b, t5.16b
|
eor t4.16b, t4.16b, t5.16b
|
||||||
eor t6.16b, t6.16b, t3.16b
|
eor t6.16b, t6.16b, t3.16b
|
||||||
ret
|
ret
|
||||||
ENDPROC(__pmull_p8_core)
|
SYM_FUNC_END(__pmull_p8_core)
|
||||||
|
|
||||||
.macro __pmull_p8, rq, ad, bd, i
|
.macro __pmull_p8, rq, ad, bd, i
|
||||||
.ifnc \bd, fold_consts
|
.ifnc \bd, fold_consts
|
||||||
@ -488,9 +488,9 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
|
|||||||
//
|
//
|
||||||
// Assumes len >= 16.
|
// Assumes len >= 16.
|
||||||
//
|
//
|
||||||
ENTRY(crc_t10dif_pmull_p8)
|
SYM_FUNC_START(crc_t10dif_pmull_p8)
|
||||||
crc_t10dif_pmull p8
|
crc_t10dif_pmull p8
|
||||||
ENDPROC(crc_t10dif_pmull_p8)
|
SYM_FUNC_END(crc_t10dif_pmull_p8)
|
||||||
|
|
||||||
.align 5
|
.align 5
|
||||||
//
|
//
|
||||||
@ -498,9 +498,9 @@ ENDPROC(crc_t10dif_pmull_p8)
|
|||||||
//
|
//
|
||||||
// Assumes len >= 16.
|
// Assumes len >= 16.
|
||||||
//
|
//
|
||||||
ENTRY(crc_t10dif_pmull_p64)
|
SYM_FUNC_START(crc_t10dif_pmull_p64)
|
||||||
crc_t10dif_pmull p64
|
crc_t10dif_pmull p64
|
||||||
ENDPROC(crc_t10dif_pmull_p64)
|
SYM_FUNC_END(crc_t10dif_pmull_p64)
|
||||||
|
|
||||||
.section ".rodata", "a"
|
.section ".rodata", "a"
|
||||||
.align 4
|
.align 4
|
||||||
|
@ -350,13 +350,13 @@ CPU_LE( rev64 T1.16b, T1.16b )
|
|||||||
* void pmull_ghash_update(int blocks, u64 dg[], const char *src,
|
* void pmull_ghash_update(int blocks, u64 dg[], const char *src,
|
||||||
* struct ghash_key const *k, const char *head)
|
* struct ghash_key const *k, const char *head)
|
||||||
*/
|
*/
|
||||||
ENTRY(pmull_ghash_update_p64)
|
SYM_FUNC_START(pmull_ghash_update_p64)
|
||||||
__pmull_ghash p64
|
__pmull_ghash p64
|
||||||
ENDPROC(pmull_ghash_update_p64)
|
SYM_FUNC_END(pmull_ghash_update_p64)
|
||||||
|
|
||||||
ENTRY(pmull_ghash_update_p8)
|
SYM_FUNC_START(pmull_ghash_update_p8)
|
||||||
__pmull_ghash p8
|
__pmull_ghash p8
|
||||||
ENDPROC(pmull_ghash_update_p8)
|
SYM_FUNC_END(pmull_ghash_update_p8)
|
||||||
|
|
||||||
KS0 .req v8
|
KS0 .req v8
|
||||||
KS1 .req v9
|
KS1 .req v9
|
||||||
|
@ -248,10 +248,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||||||
{
|
{
|
||||||
struct ghash_key *key = crypto_shash_ctx(tfm);
|
struct ghash_key *key = crypto_shash_ctx(tfm);
|
||||||
|
|
||||||
if (keylen != GHASH_BLOCK_SIZE) {
|
if (keylen != GHASH_BLOCK_SIZE)
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
return __ghash_setkey(key, inkey, keylen);
|
return __ghash_setkey(key, inkey, keylen);
|
||||||
}
|
}
|
||||||
@ -259,7 +257,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||||||
static struct shash_alg ghash_alg[] = {{
|
static struct shash_alg ghash_alg[] = {{
|
||||||
.base.cra_name = "ghash",
|
.base.cra_name = "ghash",
|
||||||
.base.cra_driver_name = "ghash-neon",
|
.base.cra_driver_name = "ghash-neon",
|
||||||
.base.cra_priority = 100,
|
.base.cra_priority = 150,
|
||||||
.base.cra_blocksize = GHASH_BLOCK_SIZE,
|
.base.cra_blocksize = GHASH_BLOCK_SIZE,
|
||||||
.base.cra_ctxsize = sizeof(struct ghash_key),
|
.base.cra_ctxsize = sizeof(struct ghash_key),
|
||||||
.base.cra_module = THIS_MODULE,
|
.base.cra_module = THIS_MODULE,
|
||||||
@ -306,10 +304,8 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
|
ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
|
||||||
if (ret) {
|
if (ret)
|
||||||
tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
|
aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@
|
|||||||
*
|
*
|
||||||
* It's guaranteed that message_len % 16 == 0.
|
* It's guaranteed that message_len % 16 == 0.
|
||||||
*/
|
*/
|
||||||
ENTRY(nh_neon)
|
SYM_FUNC_START(nh_neon)
|
||||||
|
|
||||||
ld1 {K0.4s,K1.4s}, [KEY], #32
|
ld1 {K0.4s,K1.4s}, [KEY], #32
|
||||||
movi PASS0_SUMS.2d, #0
|
movi PASS0_SUMS.2d, #0
|
||||||
@ -100,4 +100,4 @@ ENTRY(nh_neon)
|
|||||||
addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d
|
addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d
|
||||||
st1 {T0.16b,T1.16b}, [HASH]
|
st1 {T0.16b,T1.16b}, [HASH]
|
||||||
ret
|
ret
|
||||||
ENDPROC(nh_neon)
|
SYM_FUNC_END(nh_neon)
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
asmlinkage void poly1305_init_arm64(void *state, const u8 *key);
|
asmlinkage void poly1305_init_arm64(void *state, const u8 *key);
|
||||||
asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit);
|
asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit);
|
||||||
asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
|
asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
|
||||||
asmlinkage void poly1305_emit(void *state, __le32 *digest, const u32 *nonce);
|
asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
|
||||||
|
|
||||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
||||||
|
|
||||||
@ -162,9 +162,6 @@ EXPORT_SYMBOL(poly1305_update_arch);
|
|||||||
|
|
||||||
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
||||||
{
|
{
|
||||||
__le32 digest[4];
|
|
||||||
u64 f = 0;
|
|
||||||
|
|
||||||
if (unlikely(dctx->buflen)) {
|
if (unlikely(dctx->buflen)) {
|
||||||
dctx->buf[dctx->buflen++] = 1;
|
dctx->buf[dctx->buflen++] = 1;
|
||||||
memset(dctx->buf + dctx->buflen, 0,
|
memset(dctx->buf + dctx->buflen, 0,
|
||||||
@ -172,18 +169,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
|||||||
poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
poly1305_emit(&dctx->h, digest, dctx->s);
|
poly1305_emit(&dctx->h, dst, dctx->s);
|
||||||
|
|
||||||
/* mac = (h + s) % (2^128) */
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[0]);
|
|
||||||
put_unaligned_le32(f, dst);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[1]);
|
|
||||||
put_unaligned_le32(f, dst + 4);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[2]);
|
|
||||||
put_unaligned_le32(f, dst + 8);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[3]);
|
|
||||||
put_unaligned_le32(f, dst + 12);
|
|
||||||
|
|
||||||
*dctx = (struct poly1305_desc_ctx){};
|
*dctx = (struct poly1305_desc_ctx){};
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(poly1305_final_arch);
|
EXPORT_SYMBOL(poly1305_final_arch);
|
||||||
|
@ -65,7 +65,7 @@
|
|||||||
* void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
|
* void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
|
||||||
* int blocks)
|
* int blocks)
|
||||||
*/
|
*/
|
||||||
ENTRY(sha1_ce_transform)
|
SYM_FUNC_START(sha1_ce_transform)
|
||||||
frame_push 3
|
frame_push 3
|
||||||
|
|
||||||
mov x19, x0
|
mov x19, x0
|
||||||
@ -160,4 +160,4 @@ CPU_LE( rev32 v11.16b, v11.16b )
|
|||||||
str dgb, [x19, #16]
|
str dgb, [x19, #16]
|
||||||
frame_pop
|
frame_pop
|
||||||
ret
|
ret
|
||||||
ENDPROC(sha1_ce_transform)
|
SYM_FUNC_END(sha1_ce_transform)
|
||||||
|
@ -28,6 +28,13 @@ struct sha1_ce_state {
|
|||||||
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
|
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
|
||||||
int blocks);
|
int blocks);
|
||||||
|
|
||||||
|
static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
|
||||||
|
int blocks)
|
||||||
|
{
|
||||||
|
sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src,
|
||||||
|
blocks);
|
||||||
|
}
|
||||||
|
|
||||||
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
|
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
|
||||||
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
|
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
|
||||||
|
|
||||||
@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
|
|||||||
|
|
||||||
sctx->finalize = 0;
|
sctx->finalize = 0;
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha1_base_do_update(desc, data, len,
|
sha1_base_do_update(desc, data, len, __sha1_ce_transform);
|
||||||
(sha1_block_fn *)sha1_ce_transform);
|
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
sctx->finalize = finalize;
|
sctx->finalize = finalize;
|
||||||
|
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha1_base_do_update(desc, data, len,
|
sha1_base_do_update(desc, data, len, __sha1_ce_transform);
|
||||||
(sha1_block_fn *)sha1_ce_transform);
|
|
||||||
if (!finalize)
|
if (!finalize)
|
||||||
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
|
sha1_base_do_finalize(desc, __sha1_ce_transform);
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
return sha1_base_finish(desc, out);
|
return sha1_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
|
|||||||
|
|
||||||
sctx->finalize = 0;
|
sctx->finalize = 0;
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
|
sha1_base_do_finalize(desc, __sha1_ce_transform);
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
return sha1_base_finish(desc, out);
|
return sha1_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@
|
|||||||
* int blocks)
|
* int blocks)
|
||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
ENTRY(sha2_ce_transform)
|
SYM_FUNC_START(sha2_ce_transform)
|
||||||
frame_push 3
|
frame_push 3
|
||||||
|
|
||||||
mov x19, x0
|
mov x19, x0
|
||||||
@ -166,4 +166,4 @@ CPU_LE( rev32 v19.16b, v19.16b )
|
|||||||
4: st1 {dgav.4s, dgbv.4s}, [x19]
|
4: st1 {dgav.4s, dgbv.4s}, [x19]
|
||||||
frame_pop
|
frame_pop
|
||||||
ret
|
ret
|
||||||
ENDPROC(sha2_ce_transform)
|
SYM_FUNC_END(sha2_ce_transform)
|
||||||
|
@ -28,6 +28,13 @@ struct sha256_ce_state {
|
|||||||
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
||||||
int blocks);
|
int blocks);
|
||||||
|
|
||||||
|
static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
|
||||||
|
int blocks)
|
||||||
|
{
|
||||||
|
sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src,
|
||||||
|
blocks);
|
||||||
|
}
|
||||||
|
|
||||||
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
|
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
|
||||||
sst.count);
|
sst.count);
|
||||||
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
|
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
|
||||||
@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
|
|||||||
|
|
||||||
asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
|
asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
|
||||||
|
|
||||||
|
static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
|
||||||
|
int blocks)
|
||||||
|
{
|
||||||
|
sha256_block_data_order(sst->state, src, blocks);
|
||||||
|
}
|
||||||
|
|
||||||
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
|
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
|
|||||||
|
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return sha256_base_do_update(desc, data, len,
|
return sha256_base_do_update(desc, data, len,
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
__sha256_block_data_order);
|
||||||
|
|
||||||
sctx->finalize = 0;
|
sctx->finalize = 0;
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len, __sha2_ce_transform);
|
||||||
(sha256_block_fn *)sha2_ce_transform);
|
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
if (len)
|
if (len)
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len,
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
__sha256_block_data_order);
|
||||||
sha256_base_do_finalize(desc,
|
sha256_base_do_finalize(desc, __sha256_block_data_order);
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
sctx->finalize = finalize;
|
sctx->finalize = finalize;
|
||||||
|
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len, __sha2_ce_transform);
|
||||||
(sha256_block_fn *)sha2_ce_transform);
|
|
||||||
if (!finalize)
|
if (!finalize)
|
||||||
sha256_base_do_finalize(desc,
|
sha256_base_do_finalize(desc, __sha2_ce_transform);
|
||||||
(sha256_block_fn *)sha2_ce_transform);
|
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
|
|||||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
sha256_base_do_finalize(desc,
|
sha256_base_do_finalize(desc, __sha256_block_data_order);
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
sctx->finalize = 0;
|
sctx->finalize = 0;
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
|
sha256_base_do_finalize(desc, __sha2_ce_transform);
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
|
|||||||
unsigned int num_blks);
|
unsigned int num_blks);
|
||||||
EXPORT_SYMBOL(sha256_block_data_order);
|
EXPORT_SYMBOL(sha256_block_data_order);
|
||||||
|
|
||||||
|
static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
|
||||||
|
int blocks)
|
||||||
|
{
|
||||||
|
sha256_block_data_order(sst->state, src, blocks);
|
||||||
|
}
|
||||||
|
|
||||||
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
|
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
|
||||||
unsigned int num_blks);
|
unsigned int num_blks);
|
||||||
|
|
||||||
|
static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
|
||||||
|
int blocks)
|
||||||
|
{
|
||||||
|
sha256_block_neon(sst->state, src, blocks);
|
||||||
|
}
|
||||||
|
|
||||||
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
|
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
return sha256_base_do_update(desc, data, len,
|
return sha256_base_do_update(desc, data, len,
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
__sha256_block_data_order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
|
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
|
||||||
@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
{
|
{
|
||||||
if (len)
|
if (len)
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len,
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
__sha256_block_data_order);
|
||||||
sha256_base_do_finalize(desc,
|
sha256_base_do_finalize(desc, __sha256_block_data_order);
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
|
||||||
|
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
|
|||||||
|
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return sha256_base_do_update(desc, data, len,
|
return sha256_base_do_update(desc, data, len,
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
__sha256_block_data_order);
|
||||||
|
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
unsigned int chunk = len;
|
unsigned int chunk = len;
|
||||||
@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
|
|||||||
sctx->count % SHA256_BLOCK_SIZE;
|
sctx->count % SHA256_BLOCK_SIZE;
|
||||||
|
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha256_base_do_update(desc, data, chunk,
|
sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
|
||||||
(sha256_block_fn *)sha256_block_neon);
|
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
data += chunk;
|
data += chunk;
|
||||||
len -= chunk;
|
len -= chunk;
|
||||||
@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
|
|||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
if (len)
|
if (len)
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len,
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
__sha256_block_data_order);
|
||||||
sha256_base_do_finalize(desc,
|
sha256_base_do_finalize(desc, __sha256_block_data_order);
|
||||||
(sha256_block_fn *)sha256_block_data_order);
|
|
||||||
} else {
|
} else {
|
||||||
if (len)
|
if (len)
|
||||||
sha256_update_neon(desc, data, len);
|
sha256_update_neon(desc, data, len);
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha256_base_do_finalize(desc,
|
sha256_base_do_finalize(desc, __sha256_block_neon);
|
||||||
(sha256_block_fn *)sha256_block_neon);
|
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
}
|
}
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
|
@ -40,7 +40,7 @@
|
|||||||
* sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
|
* sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
|
||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
ENTRY(sha3_ce_transform)
|
SYM_FUNC_START(sha3_ce_transform)
|
||||||
frame_push 4
|
frame_push 4
|
||||||
|
|
||||||
mov x19, x0
|
mov x19, x0
|
||||||
@ -218,7 +218,7 @@ ENTRY(sha3_ce_transform)
|
|||||||
st1 {v24.1d}, [x19]
|
st1 {v24.1d}, [x19]
|
||||||
frame_pop
|
frame_pop
|
||||||
ret
|
ret
|
||||||
ENDPROC(sha3_ce_transform)
|
SYM_FUNC_END(sha3_ce_transform)
|
||||||
|
|
||||||
.section ".rodata", "a"
|
.section ".rodata", "a"
|
||||||
.align 8
|
.align 8
|
||||||
|
@ -106,7 +106,7 @@
|
|||||||
* int blocks)
|
* int blocks)
|
||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
ENTRY(sha512_ce_transform)
|
SYM_FUNC_START(sha512_ce_transform)
|
||||||
frame_push 3
|
frame_push 3
|
||||||
|
|
||||||
mov x19, x0
|
mov x19, x0
|
||||||
@ -216,4 +216,4 @@ CPU_LE( rev64 v19.16b, v19.16b )
|
|||||||
3: st1 {v8.2d-v11.2d}, [x19]
|
3: st1 {v8.2d-v11.2d}, [x19]
|
||||||
frame_pop
|
frame_pop
|
||||||
ret
|
ret
|
||||||
ENDPROC(sha512_ce_transform)
|
SYM_FUNC_END(sha512_ce_transform)
|
||||||
|
@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
|
|||||||
|
|
||||||
asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
|
asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
|
||||||
|
|
||||||
|
static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
|
||||||
|
int blocks)
|
||||||
|
{
|
||||||
|
sha512_block_data_order(sst->state, src, blocks);
|
||||||
|
}
|
||||||
|
|
||||||
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
|
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return sha512_base_do_update(desc, data, len,
|
return sha512_base_do_update(desc, data, len,
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
__sha512_block_data_order);
|
||||||
|
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha512_base_do_update(desc, data, len,
|
sha512_base_do_update(desc, data, len, sha512_ce_transform);
|
||||||
(sha512_block_fn *)sha512_ce_transform);
|
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
if (len)
|
if (len)
|
||||||
sha512_base_do_update(desc, data, len,
|
sha512_base_do_update(desc, data, len,
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
__sha512_block_data_order);
|
||||||
sha512_base_do_finalize(desc,
|
sha512_base_do_finalize(desc, __sha512_block_data_order);
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
|
||||||
return sha512_base_finish(desc, out);
|
return sha512_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha512_base_do_update(desc, data, len,
|
sha512_base_do_update(desc, data, len, sha512_ce_transform);
|
||||||
(sha512_block_fn *)sha512_ce_transform);
|
sha512_base_do_finalize(desc, sha512_ce_transform);
|
||||||
sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
|
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
return sha512_base_finish(desc, out);
|
return sha512_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
|
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
|
||||||
{
|
{
|
||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
sha512_base_do_finalize(desc,
|
sha512_base_do_finalize(desc, __sha512_block_data_order);
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
|
||||||
return sha512_base_finish(desc, out);
|
return sha512_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
|
sha512_base_do_finalize(desc, sha512_ce_transform);
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
return sha512_base_finish(desc, out);
|
return sha512_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
|
|||||||
MODULE_ALIAS_CRYPTO("sha384");
|
MODULE_ALIAS_CRYPTO("sha384");
|
||||||
MODULE_ALIAS_CRYPTO("sha512");
|
MODULE_ALIAS_CRYPTO("sha512");
|
||||||
|
|
||||||
asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
|
asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
|
||||||
unsigned int num_blks);
|
unsigned int num_blks);
|
||||||
EXPORT_SYMBOL(sha512_block_data_order);
|
EXPORT_SYMBOL(sha512_block_data_order);
|
||||||
|
|
||||||
|
static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
|
||||||
|
int blocks)
|
||||||
|
{
|
||||||
|
sha512_block_data_order(sst->state, src, blocks);
|
||||||
|
}
|
||||||
|
|
||||||
static int sha512_update(struct shash_desc *desc, const u8 *data,
|
static int sha512_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
return sha512_base_do_update(desc, data, len,
|
return sha512_base_do_update(desc, data, len,
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
__sha512_block_data_order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha512_finup(struct shash_desc *desc, const u8 *data,
|
static int sha512_finup(struct shash_desc *desc, const u8 *data,
|
||||||
@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
{
|
{
|
||||||
if (len)
|
if (len)
|
||||||
sha512_base_do_update(desc, data, len,
|
sha512_base_do_update(desc, data, len,
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
__sha512_block_data_order);
|
||||||
sha512_base_do_finalize(desc,
|
sha512_base_do_finalize(desc, __sha512_block_data_order);
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
|
||||||
|
|
||||||
return sha512_base_finish(desc, out);
|
return sha512_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@
|
|||||||
* int blocks)
|
* int blocks)
|
||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
ENTRY(sm3_ce_transform)
|
SYM_FUNC_START(sm3_ce_transform)
|
||||||
/* load state */
|
/* load state */
|
||||||
ld1 {v8.4s-v9.4s}, [x0]
|
ld1 {v8.4s-v9.4s}, [x0]
|
||||||
rev64 v8.4s, v8.4s
|
rev64 v8.4s, v8.4s
|
||||||
@ -131,7 +131,7 @@ CPU_LE( rev32 v3.16b, v3.16b )
|
|||||||
ext v9.16b, v9.16b, v9.16b, #8
|
ext v9.16b, v9.16b, v9.16b, #8
|
||||||
st1 {v8.4s-v9.4s}, [x0]
|
st1 {v8.4s-v9.4s}, [x0]
|
||||||
ret
|
ret
|
||||||
ENDPROC(sm3_ce_transform)
|
SYM_FUNC_END(sm3_ce_transform)
|
||||||
|
|
||||||
.section ".rodata", "a"
|
.section ".rodata", "a"
|
||||||
.align 3
|
.align 3
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
* void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in);
|
* void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in);
|
||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
ENTRY(sm4_ce_do_crypt)
|
SYM_FUNC_START(sm4_ce_do_crypt)
|
||||||
ld1 {v8.4s}, [x2]
|
ld1 {v8.4s}, [x2]
|
||||||
ld1 {v0.4s-v3.4s}, [x0], #64
|
ld1 {v0.4s-v3.4s}, [x0], #64
|
||||||
CPU_LE( rev32 v8.16b, v8.16b )
|
CPU_LE( rev32 v8.16b, v8.16b )
|
||||||
@ -33,4 +33,4 @@ CPU_LE( rev32 v8.16b, v8.16b )
|
|||||||
CPU_LE( rev32 v8.16b, v8.16b )
|
CPU_LE( rev32 v8.16b, v8.16b )
|
||||||
st1 {v8.4s}, [x1]
|
st1 {v8.4s}, [x1]
|
||||||
ret
|
ret
|
||||||
ENDPROC(sm4_ce_do_crypt)
|
SYM_FUNC_END(sm4_ce_do_crypt)
|
||||||
|
@ -177,10 +177,8 @@ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
|
|||||||
{
|
{
|
||||||
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
|
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
|
||||||
|
|
||||||
if (keylen != sizeof(mctx->key)) {
|
if (keylen != sizeof(mctx->key))
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
mctx->key = get_unaligned_le32(key);
|
mctx->key = get_unaligned_le32(key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
asmlinkage void poly1305_init_mips(void *state, const u8 *key);
|
asmlinkage void poly1305_init_mips(void *state, const u8 *key);
|
||||||
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
|
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
|
||||||
asmlinkage void poly1305_emit_mips(void *state, __le32 *digest, const u32 *nonce);
|
asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
|
||||||
|
|
||||||
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
|
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
|
||||||
{
|
{
|
||||||
@ -134,9 +134,6 @@ EXPORT_SYMBOL(poly1305_update_arch);
|
|||||||
|
|
||||||
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
||||||
{
|
{
|
||||||
__le32 digest[4];
|
|
||||||
u64 f = 0;
|
|
||||||
|
|
||||||
if (unlikely(dctx->buflen)) {
|
if (unlikely(dctx->buflen)) {
|
||||||
dctx->buf[dctx->buflen++] = 1;
|
dctx->buf[dctx->buflen++] = 1;
|
||||||
memset(dctx->buf + dctx->buflen, 0,
|
memset(dctx->buf + dctx->buflen, 0,
|
||||||
@ -144,18 +141,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
|||||||
poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
poly1305_emit_mips(&dctx->h, digest, dctx->s);
|
poly1305_emit_mips(&dctx->h, dst, dctx->s);
|
||||||
|
|
||||||
/* mac = (h + s) % (2^128) */
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[0]);
|
|
||||||
put_unaligned_le32(f, dst);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[1]);
|
|
||||||
put_unaligned_le32(f, dst + 4);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[2]);
|
|
||||||
put_unaligned_le32(f, dst + 8);
|
|
||||||
f = (f >> 32) + le32_to_cpu(digest[3]);
|
|
||||||
put_unaligned_le32(f, dst + 12);
|
|
||||||
|
|
||||||
*dctx = (struct poly1305_desc_ctx){};
|
*dctx = (struct poly1305_desc_ctx){};
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(poly1305_final_arch);
|
EXPORT_SYMBOL(poly1305_final_arch);
|
||||||
|
@ -94,13 +94,6 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
|||||||
{
|
{
|
||||||
struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
if (key_len != AES_KEYSIZE_128 &&
|
|
||||||
key_len != AES_KEYSIZE_192 &&
|
|
||||||
key_len != AES_KEYSIZE_256) {
|
|
||||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (key_len) {
|
switch (key_len) {
|
||||||
case AES_KEYSIZE_128:
|
case AES_KEYSIZE_128:
|
||||||
ctx->rounds = 4;
|
ctx->rounds = 4;
|
||||||
@ -114,6 +107,8 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
|||||||
ctx->rounds = 6;
|
ctx->rounds = 6;
|
||||||
ppc_expand_key_256(ctx->key_enc, in_key);
|
ppc_expand_key_256(ctx->key_enc, in_key);
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
|
ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
|
||||||
@ -139,13 +134,6 @@ static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
|
|
||||||
key_len >>= 1;
|
key_len >>= 1;
|
||||||
|
|
||||||
if (key_len != AES_KEYSIZE_128 &&
|
|
||||||
key_len != AES_KEYSIZE_192 &&
|
|
||||||
key_len != AES_KEYSIZE_256) {
|
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (key_len) {
|
switch (key_len) {
|
||||||
case AES_KEYSIZE_128:
|
case AES_KEYSIZE_128:
|
||||||
ctx->rounds = 4;
|
ctx->rounds = 4;
|
||||||
@ -162,6 +150,8 @@ static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
ppc_expand_key_256(ctx->key_enc, in_key);
|
ppc_expand_key_256(ctx->key_enc, in_key);
|
||||||
ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256);
|
ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256);
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
|
ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
|
||||||
|
@ -73,10 +73,8 @@ static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key,
|
|||||||
{
|
{
|
||||||
u32 *mctx = crypto_shash_ctx(hash);
|
u32 *mctx = crypto_shash_ctx(hash);
|
||||||
|
|
||||||
if (keylen != sizeof(u32)) {
|
if (keylen != sizeof(u32))
|
||||||
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
*mctx = le32_to_cpup((__le32 *)key);
|
*mctx = le32_to_cpup((__le32 *)key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -72,19 +72,12 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
|
|||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||||
int ret;
|
|
||||||
|
|
||||||
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||||
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
|
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
|
|
||||||
ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
|
return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
|
||||||
if (ret) {
|
|
||||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
|
||||||
tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||||
@ -182,18 +175,13 @@ static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||||
int ret;
|
|
||||||
|
|
||||||
crypto_skcipher_clear_flags(sctx->fallback.skcipher,
|
crypto_skcipher_clear_flags(sctx->fallback.skcipher,
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_skcipher_set_flags(sctx->fallback.skcipher,
|
crypto_skcipher_set_flags(sctx->fallback.skcipher,
|
||||||
crypto_skcipher_get_flags(tfm) &
|
crypto_skcipher_get_flags(tfm) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
ret = crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
|
return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
|
||||||
crypto_skcipher_set_flags(tfm,
|
|
||||||
crypto_skcipher_get_flags(sctx->fallback.skcipher) &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
|
static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
|
||||||
@ -389,17 +377,12 @@ static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
||||||
int ret;
|
|
||||||
|
|
||||||
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_skcipher_set_flags(xts_ctx->fallback,
|
crypto_skcipher_set_flags(xts_ctx->fallback,
|
||||||
crypto_skcipher_get_flags(tfm) &
|
crypto_skcipher_get_flags(tfm) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
|
return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
|
||||||
crypto_skcipher_set_flags(tfm,
|
|
||||||
crypto_skcipher_get_flags(xts_ctx->fallback) &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||||
@ -414,10 +397,8 @@ static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* In fips mode only 128 bit or 256 bit keys are valid */
|
/* In fips mode only 128 bit or 256 bit keys are valid */
|
||||||
if (fips_enabled && key_len != 32 && key_len != 64) {
|
if (fips_enabled && key_len != 32 && key_len != 64)
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
/* Pick the correct function code based on the key length */
|
/* Pick the correct function code based on the key length */
|
||||||
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
|
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
|
||||||
|
@ -111,10 +111,8 @@ static int crc32_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
|
|||||||
{
|
{
|
||||||
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
|
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
|
||||||
|
|
||||||
if (newkeylen != sizeof(mctx->key)) {
|
if (newkeylen != sizeof(mctx->key))
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
mctx->key = le32_to_cpu(*(__le32 *)newkey);
|
mctx->key = le32_to_cpu(*(__le32 *)newkey);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -124,10 +122,8 @@ static int crc32be_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
|
|||||||
{
|
{
|
||||||
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
|
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
|
||||||
|
|
||||||
if (newkeylen != sizeof(mctx->key)) {
|
if (newkeylen != sizeof(mctx->key))
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
mctx->key = be32_to_cpu(*(__be32 *)newkey);
|
mctx->key = be32_to_cpu(*(__be32 *)newkey);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -43,10 +43,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||||||
{
|
{
|
||||||
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
|
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
|
||||||
|
|
||||||
if (keylen != GHASH_BLOCK_SIZE) {
|
if (keylen != GHASH_BLOCK_SIZE)
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
|
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
|
||||||
|
|
||||||
|
@ -151,11 +151,7 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (__paes_set_key(ctx)) {
|
return __paes_set_key(ctx);
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||||
@ -254,11 +250,7 @@ static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (__cbc_paes_set_key(ctx)) {
|
return __cbc_paes_set_key(ctx);
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||||
@ -386,10 +378,9 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (__xts_paes_set_key(ctx)) {
|
rc = __xts_paes_set_key(ctx);
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
if (rc)
|
||||||
return -EINVAL;
|
return rc;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* xts_check_key verifies the key length is not odd and makes
|
* xts_check_key verifies the key length is not odd and makes
|
||||||
@ -526,11 +517,7 @@ static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (__ctr_paes_set_key(ctx)) {
|
return __ctr_paes_set_key(ctx);
|
||||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
||||||
|
@ -169,7 +169,6 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
u32 *flags = &tfm->crt_flags;
|
|
||||||
|
|
||||||
switch (key_len) {
|
switch (key_len) {
|
||||||
case AES_KEYSIZE_128:
|
case AES_KEYSIZE_128:
|
||||||
@ -188,7 +187,6 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,12 +39,9 @@ static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key,
|
|||||||
{
|
{
|
||||||
struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
const u32 *in_key = (const u32 *) _in_key;
|
const u32 *in_key = (const u32 *) _in_key;
|
||||||
u32 *flags = &tfm->crt_flags;
|
|
||||||
|
|
||||||
if (key_len != 16 && key_len != 24 && key_len != 32) {
|
if (key_len != 16 && key_len != 24 && key_len != 32)
|
||||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
ctx->key_len = key_len;
|
ctx->key_len = key_len;
|
||||||
|
|
||||||
|
@ -33,10 +33,8 @@ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key,
|
|||||||
{
|
{
|
||||||
u32 *mctx = crypto_shash_ctx(hash);
|
u32 *mctx = crypto_shash_ctx(hash);
|
||||||
|
|
||||||
if (keylen != sizeof(u32)) {
|
if (keylen != sizeof(u32))
|
||||||
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
*(__le32 *)mctx = le32_to_cpup((__le32 *)key);
|
*(__le32 *)mctx = le32_to_cpup((__le32 *)key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
1
arch/x86/crypto/.gitignore
vendored
Normal file
1
arch/x86/crypto/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
poly1305-x86_64-cryptogams.S
|
@ -73,6 +73,10 @@ aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
|
|||||||
|
|
||||||
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
|
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
|
||||||
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
|
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
|
||||||
|
poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
|
||||||
|
ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),)
|
||||||
|
targets += poly1305-x86_64-cryptogams.S
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(avx_supported),yes)
|
ifeq ($(avx_supported),yes)
|
||||||
camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
|
camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
|
||||||
@ -101,10 +105,8 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
|
|||||||
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
|
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
|
||||||
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
|
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
|
||||||
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
|
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
|
||||||
poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o
|
|
||||||
ifeq ($(avx2_supported),yes)
|
ifeq ($(avx2_supported),yes)
|
||||||
sha1-ssse3-y += sha1_avx2_x86_64_asm.o
|
sha1-ssse3-y += sha1_avx2_x86_64_asm.o
|
||||||
poly1305-x86_64-y += poly1305-avx2-x86_64.o
|
|
||||||
endif
|
endif
|
||||||
ifeq ($(sha1_ni_supported),yes)
|
ifeq ($(sha1_ni_supported),yes)
|
||||||
sha1-ssse3-y += sha1_ni_asm.o
|
sha1-ssse3-y += sha1_ni_asm.o
|
||||||
@ -118,3 +120,8 @@ sha256-ssse3-y += sha256_ni_asm.o
|
|||||||
endif
|
endif
|
||||||
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
|
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
|
||||||
crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
|
crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
|
||||||
|
|
||||||
|
quiet_cmd_perlasm = PERLASM $@
|
||||||
|
cmd_perlasm = $(PERL) $< > $@
|
||||||
|
$(obj)/%.S: $(src)/%.pl FORCE
|
||||||
|
$(call if_changed,perlasm)
|
||||||
|
@ -144,10 +144,8 @@ static int crypto_aegis128_aesni_setkey(struct crypto_aead *aead, const u8 *key,
|
|||||||
{
|
{
|
||||||
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(aead);
|
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(aead);
|
||||||
|
|
||||||
if (keylen != AEGIS128_KEY_SIZE) {
|
if (keylen != AEGIS128_KEY_SIZE)
|
||||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
|
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
|
||||||
|
|
||||||
|
@ -1942,7 +1942,7 @@ SYM_FUNC_START(aesni_set_key)
|
|||||||
SYM_FUNC_END(aesni_set_key)
|
SYM_FUNC_END(aesni_set_key)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
|
* void aesni_enc(const void *ctx, u8 *dst, const u8 *src)
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(aesni_enc)
|
SYM_FUNC_START(aesni_enc)
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
@ -2131,7 +2131,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
|
|||||||
SYM_FUNC_END(_aesni_enc4)
|
SYM_FUNC_END(_aesni_enc4)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
|
* void aesni_dec (const void *ctx, u8 *dst, const u8 *src)
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(aesni_dec)
|
SYM_FUNC_START(aesni_dec)
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
@ -2716,8 +2716,8 @@ SYM_FUNC_END(aesni_ctr_enc)
|
|||||||
pxor CTR, IV;
|
pxor CTR, IV;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
|
* void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst,
|
||||||
* bool enc, u8 *iv)
|
* const u8 *src, bool enc, le128 *iv)
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(aesni_xts_crypt8)
|
SYM_FUNC_START(aesni_xts_crypt8)
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
|
@ -83,10 +83,8 @@ struct gcm_context_data {
|
|||||||
|
|
||||||
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
||||||
unsigned int key_len);
|
unsigned int key_len);
|
||||||
asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
|
||||||
const u8 *in);
|
asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
|
||||||
asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
|
||||||
const u8 *in);
|
|
||||||
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
const u8 *in, unsigned int len);
|
const u8 *in, unsigned int len);
|
||||||
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
@ -106,8 +104,8 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
|
|||||||
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
const u8 *in, unsigned int len, u8 *iv);
|
const u8 *in, unsigned int len, u8 *iv);
|
||||||
|
|
||||||
asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
|
asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
const u8 *in, bool enc, u8 *iv);
|
const u8 *in, bool enc, le128 *iv);
|
||||||
|
|
||||||
/* asmlinkage void aesni_gcm_enc()
|
/* asmlinkage void aesni_gcm_enc()
|
||||||
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
||||||
@ -318,14 +316,11 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
|
|||||||
const u8 *in_key, unsigned int key_len)
|
const u8 *in_key, unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
|
struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
|
||||||
u32 *flags = &tfm->crt_flags;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
|
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
|
||||||
key_len != AES_KEYSIZE_256) {
|
key_len != AES_KEYSIZE_256)
|
||||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
err = aes_expandkey(ctx, in_key, key_len);
|
err = aes_expandkey(ctx, in_key, key_len);
|
||||||
@ -550,29 +545,24 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
|
static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
aesni_enc(ctx, out, in);
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
|
aesni_xts_crypt8(ctx, dst, src, true, iv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
|
aesni_xts_crypt8(ctx, dst, src, false, iv);
|
||||||
}
|
|
||||||
|
|
||||||
static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
|
||||||
{
|
|
||||||
aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct common_glue_ctx aesni_enc_xts = {
|
static const struct common_glue_ctx aesni_enc_xts = {
|
||||||
@ -581,10 +571,10 @@ static const struct common_glue_ctx aesni_enc_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
|
.fn_u = { .xts = aesni_xts_enc8 }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
|
.fn_u = { .xts = aesni_xts_enc }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -594,10 +584,10 @@ static const struct common_glue_ctx aesni_dec_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
|
.fn_u = { .xts = aesni_xts_dec8 }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
|
.fn_u = { .xts = aesni_xts_dec }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -606,8 +596,7 @@ static int xts_encrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&aesni_enc_xts, req,
|
return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
|
||||||
XTS_TWEAK_CAST(aesni_xts_tweak),
|
|
||||||
aes_ctx(ctx->raw_tweak_ctx),
|
aes_ctx(ctx->raw_tweak_ctx),
|
||||||
aes_ctx(ctx->raw_crypt_ctx),
|
aes_ctx(ctx->raw_crypt_ctx),
|
||||||
false);
|
false);
|
||||||
@ -618,8 +607,7 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&aesni_dec_xts, req,
|
return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
|
||||||
XTS_TWEAK_CAST(aesni_xts_tweak),
|
|
||||||
aes_ctx(ctx->raw_tweak_ctx),
|
aes_ctx(ctx->raw_tweak_ctx),
|
||||||
aes_ctx(ctx->raw_crypt_ctx),
|
aes_ctx(ctx->raw_crypt_ctx),
|
||||||
true);
|
true);
|
||||||
@ -650,10 +638,9 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
|
|||||||
{
|
{
|
||||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
|
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
|
||||||
|
|
||||||
if (key_len < 4) {
|
if (key_len < 4)
|
||||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
/*Account for 4 byte nonce at the end.*/
|
/*Account for 4 byte nonce at the end.*/
|
||||||
key_len -= 4;
|
key_len -= 4;
|
||||||
|
|
||||||
|
@ -64,10 +64,8 @@ static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
|
|||||||
{
|
{
|
||||||
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
||||||
|
|
||||||
if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) {
|
if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(tctx->key, key, keylen);
|
memcpy(tctx->key, key, keylen);
|
||||||
tctx->keylen = keylen;
|
tctx->keylen = keylen;
|
||||||
|
@ -19,20 +19,17 @@
|
|||||||
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
|
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
|
||||||
|
|
||||||
/* 32-way AVX2/AES-NI parallel cipher functions */
|
/* 32-way AVX2/AES-NI parallel cipher functions */
|
||||||
asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst,
|
le128 *iv);
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
|
|
||||||
asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
static const struct common_glue_ctx camellia_enc = {
|
static const struct common_glue_ctx camellia_enc = {
|
||||||
.num_funcs = 4,
|
.num_funcs = 4,
|
||||||
@ -40,16 +37,16 @@ static const struct common_glue_ctx camellia_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_32way) }
|
.fn_u = { .ecb = camellia_ecb_enc_32way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) }
|
.fn_u = { .ecb = camellia_ecb_enc_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
|
.fn_u = { .ecb = camellia_enc_blk_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
|
.fn_u = { .ecb = camellia_enc_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -59,16 +56,16 @@ static const struct common_glue_ctx camellia_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_32way) }
|
.fn_u = { .ctr = camellia_ctr_32way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) }
|
.fn_u = { .ctr = camellia_ctr_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
|
.fn_u = { .ctr = camellia_crypt_ctr_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
|
.fn_u = { .ctr = camellia_crypt_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -78,13 +75,13 @@ static const struct common_glue_ctx camellia_enc_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_32way) }
|
.fn_u = { .xts = camellia_xts_enc_32way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) }
|
.fn_u = { .xts = camellia_xts_enc_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) }
|
.fn_u = { .xts = camellia_xts_enc }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -94,16 +91,16 @@ static const struct common_glue_ctx camellia_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_32way) }
|
.fn_u = { .ecb = camellia_ecb_dec_32way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) }
|
.fn_u = { .ecb = camellia_ecb_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
|
.fn_u = { .ecb = camellia_dec_blk_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
|
.fn_u = { .ecb = camellia_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -113,16 +110,16 @@ static const struct common_glue_ctx camellia_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_32way) }
|
.fn_u = { .cbc = camellia_cbc_dec_32way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) }
|
.fn_u = { .cbc = camellia_cbc_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
|
.fn_u = { .cbc = camellia_decrypt_cbc_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
|
.fn_u = { .cbc = camellia_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -132,21 +129,20 @@ static const struct common_glue_ctx camellia_dec_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_32way) }
|
.fn_u = { .xts = camellia_xts_dec_32way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) }
|
.fn_u = { .xts = camellia_xts_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) }
|
.fn_u = { .xts = camellia_xts_dec }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
|
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen);
|
||||||
&tfm->base.crt_flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ecb_encrypt(struct skcipher_request *req)
|
static int ecb_encrypt(struct skcipher_request *req)
|
||||||
@ -161,8 +157,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
|
return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
@ -180,8 +175,7 @@ static int xts_encrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&camellia_enc_xts, req,
|
return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
|
||||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,8 +184,7 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&camellia_dec_xts, req,
|
return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
|
||||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,41 +18,36 @@
|
|||||||
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
|
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
|
||||||
|
|
||||||
/* 16-way parallel cipher functions (avx/aes-ni) */
|
/* 16-way parallel cipher functions (avx/aes-ni) */
|
||||||
asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way);
|
EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way);
|
||||||
|
|
||||||
asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way);
|
EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way);
|
||||||
|
|
||||||
asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way);
|
EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way);
|
||||||
|
|
||||||
asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
EXPORT_SYMBOL_GPL(camellia_ctr_16way);
|
EXPORT_SYMBOL_GPL(camellia_ctr_16way);
|
||||||
|
|
||||||
asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
EXPORT_SYMBOL_GPL(camellia_xts_enc_16way);
|
EXPORT_SYMBOL_GPL(camellia_xts_enc_16way);
|
||||||
|
|
||||||
asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
EXPORT_SYMBOL_GPL(camellia_xts_dec_16way);
|
EXPORT_SYMBOL_GPL(camellia_xts_dec_16way);
|
||||||
|
|
||||||
void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk);
|
||||||
GLUE_FUNC_CAST(camellia_enc_blk));
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(camellia_xts_enc);
|
EXPORT_SYMBOL_GPL(camellia_xts_enc);
|
||||||
|
|
||||||
void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk);
|
||||||
GLUE_FUNC_CAST(camellia_dec_blk));
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(camellia_xts_dec);
|
EXPORT_SYMBOL_GPL(camellia_xts_dec);
|
||||||
|
|
||||||
@ -62,13 +57,13 @@ static const struct common_glue_ctx camellia_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) }
|
.fn_u = { .ecb = camellia_ecb_enc_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
|
.fn_u = { .ecb = camellia_enc_blk_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
|
.fn_u = { .ecb = camellia_enc_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -78,13 +73,13 @@ static const struct common_glue_ctx camellia_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) }
|
.fn_u = { .ctr = camellia_ctr_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
|
.fn_u = { .ctr = camellia_crypt_ctr_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
|
.fn_u = { .ctr = camellia_crypt_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -94,10 +89,10 @@ static const struct common_glue_ctx camellia_enc_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) }
|
.fn_u = { .xts = camellia_xts_enc_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) }
|
.fn_u = { .xts = camellia_xts_enc }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -107,13 +102,13 @@ static const struct common_glue_ctx camellia_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) }
|
.fn_u = { .ecb = camellia_ecb_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
|
.fn_u = { .ecb = camellia_dec_blk_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
|
.fn_u = { .ecb = camellia_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -123,13 +118,13 @@ static const struct common_glue_ctx camellia_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) }
|
.fn_u = { .cbc = camellia_cbc_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
|
.fn_u = { .cbc = camellia_decrypt_cbc_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
|
.fn_u = { .cbc = camellia_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -139,18 +134,17 @@ static const struct common_glue_ctx camellia_dec_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) }
|
.fn_u = { .xts = camellia_xts_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) }
|
.fn_u = { .xts = camellia_xts_dec }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
|
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen);
|
||||||
&tfm->base.crt_flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ecb_encrypt(struct skcipher_request *req)
|
static int ecb_encrypt(struct skcipher_request *req)
|
||||||
@ -165,8 +159,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
|
return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
@ -183,7 +176,6 @@ int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
u32 *flags = &tfm->base.crt_flags;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = xts_verify_key(tfm, key, keylen);
|
err = xts_verify_key(tfm, key, keylen);
|
||||||
@ -191,13 +183,12 @@ int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* first half of xts-key is for crypt */
|
/* first half of xts-key is for crypt */
|
||||||
err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
|
err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* second half of xts-key is for tweak */
|
/* second half of xts-key is for tweak */
|
||||||
return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
|
return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
|
||||||
flags);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xts_camellia_setkey);
|
EXPORT_SYMBOL_GPL(xts_camellia_setkey);
|
||||||
|
|
||||||
@ -206,8 +197,7 @@ static int xts_encrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&camellia_enc_xts, req,
|
return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
|
||||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,8 +206,7 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&camellia_dec_xts, req,
|
return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
|
||||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,19 +18,17 @@
|
|||||||
#include <asm/crypto/glue_helper.h>
|
#include <asm/crypto/glue_helper.h>
|
||||||
|
|
||||||
/* regular block cipher functions */
|
/* regular block cipher functions */
|
||||||
asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, bool xor);
|
bool xor);
|
||||||
EXPORT_SYMBOL_GPL(__camellia_enc_blk);
|
EXPORT_SYMBOL_GPL(__camellia_enc_blk);
|
||||||
asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
EXPORT_SYMBOL_GPL(camellia_dec_blk);
|
EXPORT_SYMBOL_GPL(camellia_dec_blk);
|
||||||
|
|
||||||
/* 2-way parallel cipher functions */
|
/* 2-way parallel cipher functions */
|
||||||
asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, bool xor);
|
bool xor);
|
||||||
EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way);
|
EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way);
|
||||||
asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
EXPORT_SYMBOL_GPL(camellia_dec_blk_2way);
|
EXPORT_SYMBOL_GPL(camellia_dec_blk_2way);
|
||||||
|
|
||||||
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||||
@ -1229,12 +1227,10 @@ static void camellia_setup192(const unsigned char *key, u64 *subkey)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
|
int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
|
||||||
unsigned int key_len, u32 *flags)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
if (key_len != 16 && key_len != 24 && key_len != 32) {
|
if (key_len != 16 && key_len != 24 && key_len != 32)
|
||||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
cctx->key_length = key_len;
|
cctx->key_length = key_len;
|
||||||
|
|
||||||
@ -1257,8 +1253,7 @@ EXPORT_SYMBOL_GPL(__camellia_setkey);
|
|||||||
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
|
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len,
|
return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len);
|
||||||
&tfm->crt_flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
@ -1267,8 +1262,10 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
return camellia_setkey(&tfm->base, key, key_len);
|
return camellia_setkey(&tfm->base, key, key_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
|
void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s)
|
||||||
{
|
{
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
u128 iv = *src;
|
u128 iv = *src;
|
||||||
|
|
||||||
camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src);
|
camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src);
|
||||||
@ -1277,9 +1274,11 @@ void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way);
|
EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way);
|
||||||
|
|
||||||
void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
|
||||||
{
|
{
|
||||||
be128 ctrblk;
|
be128 ctrblk;
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
if (dst != src)
|
if (dst != src)
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
@ -1291,9 +1290,11 @@ void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(camellia_crypt_ctr);
|
EXPORT_SYMBOL_GPL(camellia_crypt_ctr);
|
||||||
|
|
||||||
void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
|
||||||
{
|
{
|
||||||
be128 ctrblks[2];
|
be128 ctrblks[2];
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
if (dst != src) {
|
if (dst != src) {
|
||||||
dst[0] = src[0];
|
dst[0] = src[0];
|
||||||
@ -1315,10 +1316,10 @@ static const struct common_glue_ctx camellia_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
|
.fn_u = { .ecb = camellia_enc_blk_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
|
.fn_u = { .ecb = camellia_enc_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1328,10 +1329,10 @@ static const struct common_glue_ctx camellia_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
|
.fn_u = { .ctr = camellia_crypt_ctr_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
|
.fn_u = { .ctr = camellia_crypt_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1341,10 +1342,10 @@ static const struct common_glue_ctx camellia_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
|
.fn_u = { .ecb = camellia_dec_blk_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
|
.fn_u = { .ecb = camellia_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1354,10 +1355,10 @@ static const struct common_glue_ctx camellia_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 2,
|
.num_blocks = 2,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
|
.fn_u = { .cbc = camellia_decrypt_cbc_2way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
|
.fn_u = { .cbc = camellia_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1373,8 +1374,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
|
return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
|
@ -20,20 +20,17 @@
|
|||||||
|
|
||||||
#define CAST6_PARALLEL_BLOCKS 8
|
#define CAST6_PARALLEL_BLOCKS 8
|
||||||
|
|
||||||
asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst,
|
asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst,
|
asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src,
|
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
|
asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
|
asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
|
static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||||
const u8 *key, unsigned int keylen)
|
const u8 *key, unsigned int keylen)
|
||||||
@ -41,21 +38,21 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
|
|||||||
return cast6_setkey(&tfm->base, key, keylen);
|
return cast6_setkey(&tfm->base, key, keylen);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt);
|
||||||
GLUE_FUNC_CAST(__cast6_encrypt));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt);
|
||||||
GLUE_FUNC_CAST(__cast6_decrypt));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
|
||||||
{
|
{
|
||||||
be128 ctrblk;
|
be128 ctrblk;
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
le128_to_be128(&ctrblk, iv);
|
le128_to_be128(&ctrblk, iv);
|
||||||
le128_inc(iv);
|
le128_inc(iv);
|
||||||
@ -70,10 +67,10 @@ static const struct common_glue_ctx cast6_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) }
|
.fn_u = { .ecb = cast6_ecb_enc_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) }
|
.fn_u = { .ecb = __cast6_encrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -83,10 +80,10 @@ static const struct common_glue_ctx cast6_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) }
|
.fn_u = { .ctr = cast6_ctr_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) }
|
.fn_u = { .ctr = cast6_crypt_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -96,10 +93,10 @@ static const struct common_glue_ctx cast6_enc_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc_8way) }
|
.fn_u = { .xts = cast6_xts_enc_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc) }
|
.fn_u = { .xts = cast6_xts_enc }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -109,10 +106,10 @@ static const struct common_glue_ctx cast6_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) }
|
.fn_u = { .ecb = cast6_ecb_dec_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) }
|
.fn_u = { .ecb = __cast6_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -122,10 +119,10 @@ static const struct common_glue_ctx cast6_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) }
|
.fn_u = { .cbc = cast6_cbc_dec_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) }
|
.fn_u = { .cbc = __cast6_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -135,10 +132,10 @@ static const struct common_glue_ctx cast6_dec_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
.num_blocks = CAST6_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec_8way) }
|
.fn_u = { .xts = cast6_xts_dec_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec) }
|
.fn_u = { .xts = cast6_xts_dec }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -154,8 +151,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt),
|
return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
@ -177,7 +173,6 @@ static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
u32 *flags = &tfm->base.crt_flags;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = xts_verify_key(tfm, key, keylen);
|
err = xts_verify_key(tfm, key, keylen);
|
||||||
@ -185,13 +180,12 @@ static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* first half of xts-key is for crypt */
|
/* first half of xts-key is for crypt */
|
||||||
err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
|
err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* second half of xts-key is for tweak */
|
/* second half of xts-key is for tweak */
|
||||||
return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
|
return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
|
||||||
flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xts_encrypt(struct skcipher_request *req)
|
static int xts_encrypt(struct skcipher_request *req)
|
||||||
@ -199,8 +193,7 @@ static int xts_encrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&cast6_enc_xts, req,
|
return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt,
|
||||||
XTS_TWEAK_CAST(__cast6_encrypt),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,8 +202,7 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&cast6_dec_xts, req,
|
return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt,
|
||||||
XTS_TWEAK_CAST(__cast6_encrypt),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,10 +94,8 @@ static int crc32_pclmul_setkey(struct crypto_shash *hash, const u8 *key,
|
|||||||
{
|
{
|
||||||
u32 *mctx = crypto_shash_ctx(hash);
|
u32 *mctx = crypto_shash_ctx(hash);
|
||||||
|
|
||||||
if (keylen != sizeof(u32)) {
|
if (keylen != sizeof(u32))
|
||||||
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
*mctx = le32_to_cpup((__le32 *)key);
|
*mctx = le32_to_cpup((__le32 *)key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -91,10 +91,8 @@ static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key,
|
|||||||
{
|
{
|
||||||
u32 *mctx = crypto_shash_ctx(hash);
|
u32 *mctx = crypto_shash_ctx(hash);
|
||||||
|
|
||||||
if (keylen != sizeof(u32)) {
|
if (keylen != sizeof(u32))
|
||||||
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
*mctx = le32_to_cpup((__le32 *)key);
|
*mctx = le32_to_cpup((__le32 *)key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -57,10 +57,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||||||
be128 *x = (be128 *)key;
|
be128 *x = (be128 *)key;
|
||||||
u64 a, b;
|
u64 a, b;
|
||||||
|
|
||||||
if (keylen != GHASH_BLOCK_SIZE) {
|
if (keylen != GHASH_BLOCK_SIZE)
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
/* perform multiplication by 'x' in GF(2^128) */
|
/* perform multiplication by 'x' in GF(2^128) */
|
||||||
a = be64_to_cpu(x->a);
|
a = be64_to_cpu(x->a);
|
||||||
@ -257,16 +255,11 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||||||
{
|
{
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
|
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
|
||||||
int err;
|
|
||||||
|
|
||||||
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
|
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
|
||||||
& CRYPTO_TFM_REQ_MASK);
|
& CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_ahash_setkey(child, key, keylen);
|
return crypto_ahash_setkey(child, key, keylen);
|
||||||
crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
|
|
||||||
& CRYPTO_TFM_RES_MASK);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
|
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
|
||||||
|
@ -134,7 +134,8 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
|
|||||||
src -= num_blocks - 1;
|
src -= num_blocks - 1;
|
||||||
dst -= num_blocks - 1;
|
dst -= num_blocks - 1;
|
||||||
|
|
||||||
gctx->funcs[i].fn_u.cbc(ctx, dst, src);
|
gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
|
||||||
|
(const u8 *)src);
|
||||||
|
|
||||||
nbytes -= func_bytes;
|
nbytes -= func_bytes;
|
||||||
if (nbytes < bsize)
|
if (nbytes < bsize)
|
||||||
@ -188,7 +189,9 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
|||||||
|
|
||||||
/* Process multi-block batch */
|
/* Process multi-block batch */
|
||||||
do {
|
do {
|
||||||
gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
|
gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
|
||||||
|
(const u8 *)src,
|
||||||
|
&ctrblk);
|
||||||
src += num_blocks;
|
src += num_blocks;
|
||||||
dst += num_blocks;
|
dst += num_blocks;
|
||||||
nbytes -= func_bytes;
|
nbytes -= func_bytes;
|
||||||
@ -210,7 +213,8 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
|||||||
|
|
||||||
be128_to_le128(&ctrblk, (be128 *)walk.iv);
|
be128_to_le128(&ctrblk, (be128 *)walk.iv);
|
||||||
memcpy(&tmp, walk.src.virt.addr, nbytes);
|
memcpy(&tmp, walk.src.virt.addr, nbytes);
|
||||||
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
|
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
|
||||||
|
(const u8 *)&tmp,
|
||||||
&ctrblk);
|
&ctrblk);
|
||||||
memcpy(walk.dst.virt.addr, &tmp, nbytes);
|
memcpy(walk.dst.virt.addr, &tmp, nbytes);
|
||||||
le128_to_be128((be128 *)walk.iv, &ctrblk);
|
le128_to_be128((be128 *)walk.iv, &ctrblk);
|
||||||
@ -240,7 +244,8 @@ static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
|||||||
|
|
||||||
if (nbytes >= func_bytes) {
|
if (nbytes >= func_bytes) {
|
||||||
do {
|
do {
|
||||||
gctx->funcs[i].fn_u.xts(ctx, dst, src,
|
gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst,
|
||||||
|
(const u8 *)src,
|
||||||
walk->iv);
|
walk->iv);
|
||||||
|
|
||||||
src += num_blocks;
|
src += num_blocks;
|
||||||
@ -354,8 +359,8 @@ out:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
|
EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
|
||||||
|
|
||||||
void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
|
void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src,
|
||||||
common_glue_func_t fn)
|
le128 *iv, common_glue_func_t fn)
|
||||||
{
|
{
|
||||||
le128 ivblk = *iv;
|
le128 ivblk = *iv;
|
||||||
|
|
||||||
@ -363,13 +368,13 @@ void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
|
|||||||
gf128mul_x_ble(iv, &ivblk);
|
gf128mul_x_ble(iv, &ivblk);
|
||||||
|
|
||||||
/* CC <- T xor C */
|
/* CC <- T xor C */
|
||||||
u128_xor(dst, src, (u128 *)&ivblk);
|
u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk);
|
||||||
|
|
||||||
/* PP <- D(Key2,CC) */
|
/* PP <- D(Key2,CC) */
|
||||||
fn(ctx, (u8 *)dst, (u8 *)dst);
|
fn(ctx, dst, dst);
|
||||||
|
|
||||||
/* P <- T xor PP */
|
/* P <- T xor PP */
|
||||||
u128_xor(dst, dst, (u128 *)&ivblk);
|
u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
|
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
|
||||||
|
|
||||||
|
@ -1,390 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
||||||
/*
|
|
||||||
* Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
|
|
||||||
*
|
|
||||||
* Copyright (C) 2015 Martin Willi
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
|
|
||||||
.section .rodata.cst32.ANMASK, "aM", @progbits, 32
|
|
||||||
.align 32
|
|
||||||
ANMASK: .octa 0x0000000003ffffff0000000003ffffff
|
|
||||||
.octa 0x0000000003ffffff0000000003ffffff
|
|
||||||
|
|
||||||
.section .rodata.cst32.ORMASK, "aM", @progbits, 32
|
|
||||||
.align 32
|
|
||||||
ORMASK: .octa 0x00000000010000000000000001000000
|
|
||||||
.octa 0x00000000010000000000000001000000
|
|
||||||
|
|
||||||
.text
|
|
||||||
|
|
||||||
#define h0 0x00(%rdi)
|
|
||||||
#define h1 0x04(%rdi)
|
|
||||||
#define h2 0x08(%rdi)
|
|
||||||
#define h3 0x0c(%rdi)
|
|
||||||
#define h4 0x10(%rdi)
|
|
||||||
#define r0 0x00(%rdx)
|
|
||||||
#define r1 0x04(%rdx)
|
|
||||||
#define r2 0x08(%rdx)
|
|
||||||
#define r3 0x0c(%rdx)
|
|
||||||
#define r4 0x10(%rdx)
|
|
||||||
#define u0 0x00(%r8)
|
|
||||||
#define u1 0x04(%r8)
|
|
||||||
#define u2 0x08(%r8)
|
|
||||||
#define u3 0x0c(%r8)
|
|
||||||
#define u4 0x10(%r8)
|
|
||||||
#define w0 0x14(%r8)
|
|
||||||
#define w1 0x18(%r8)
|
|
||||||
#define w2 0x1c(%r8)
|
|
||||||
#define w3 0x20(%r8)
|
|
||||||
#define w4 0x24(%r8)
|
|
||||||
#define y0 0x28(%r8)
|
|
||||||
#define y1 0x2c(%r8)
|
|
||||||
#define y2 0x30(%r8)
|
|
||||||
#define y3 0x34(%r8)
|
|
||||||
#define y4 0x38(%r8)
|
|
||||||
#define m %rsi
|
|
||||||
#define hc0 %ymm0
|
|
||||||
#define hc1 %ymm1
|
|
||||||
#define hc2 %ymm2
|
|
||||||
#define hc3 %ymm3
|
|
||||||
#define hc4 %ymm4
|
|
||||||
#define hc0x %xmm0
|
|
||||||
#define hc1x %xmm1
|
|
||||||
#define hc2x %xmm2
|
|
||||||
#define hc3x %xmm3
|
|
||||||
#define hc4x %xmm4
|
|
||||||
#define t1 %ymm5
|
|
||||||
#define t2 %ymm6
|
|
||||||
#define t1x %xmm5
|
|
||||||
#define t2x %xmm6
|
|
||||||
#define ruwy0 %ymm7
|
|
||||||
#define ruwy1 %ymm8
|
|
||||||
#define ruwy2 %ymm9
|
|
||||||
#define ruwy3 %ymm10
|
|
||||||
#define ruwy4 %ymm11
|
|
||||||
#define ruwy0x %xmm7
|
|
||||||
#define ruwy1x %xmm8
|
|
||||||
#define ruwy2x %xmm9
|
|
||||||
#define ruwy3x %xmm10
|
|
||||||
#define ruwy4x %xmm11
|
|
||||||
#define svxz1 %ymm12
|
|
||||||
#define svxz2 %ymm13
|
|
||||||
#define svxz3 %ymm14
|
|
||||||
#define svxz4 %ymm15
|
|
||||||
#define d0 %r9
|
|
||||||
#define d1 %r10
|
|
||||||
#define d2 %r11
|
|
||||||
#define d3 %r12
|
|
||||||
#define d4 %r13
|
|
||||||
|
|
||||||
SYM_FUNC_START(poly1305_4block_avx2)
|
|
||||||
# %rdi: Accumulator h[5]
|
|
||||||
# %rsi: 64 byte input block m
|
|
||||||
# %rdx: Poly1305 key r[5]
|
|
||||||
# %rcx: Quadblock count
|
|
||||||
# %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
|
|
||||||
|
|
||||||
# This four-block variant uses loop unrolled block processing. It
|
|
||||||
# requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
|
|
||||||
# h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
|
|
||||||
|
|
||||||
vzeroupper
|
|
||||||
push %rbx
|
|
||||||
push %r12
|
|
||||||
push %r13
|
|
||||||
|
|
||||||
# combine r0,u0,w0,y0
|
|
||||||
vmovd y0,ruwy0x
|
|
||||||
vmovd w0,t1x
|
|
||||||
vpunpcklqdq t1,ruwy0,ruwy0
|
|
||||||
vmovd u0,t1x
|
|
||||||
vmovd r0,t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,ruwy0,ruwy0
|
|
||||||
|
|
||||||
# combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
|
|
||||||
vmovd y1,ruwy1x
|
|
||||||
vmovd w1,t1x
|
|
||||||
vpunpcklqdq t1,ruwy1,ruwy1
|
|
||||||
vmovd u1,t1x
|
|
||||||
vmovd r1,t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,ruwy1,ruwy1
|
|
||||||
vpslld $2,ruwy1,svxz1
|
|
||||||
vpaddd ruwy1,svxz1,svxz1
|
|
||||||
|
|
||||||
# combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
|
|
||||||
vmovd y2,ruwy2x
|
|
||||||
vmovd w2,t1x
|
|
||||||
vpunpcklqdq t1,ruwy2,ruwy2
|
|
||||||
vmovd u2,t1x
|
|
||||||
vmovd r2,t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,ruwy2,ruwy2
|
|
||||||
vpslld $2,ruwy2,svxz2
|
|
||||||
vpaddd ruwy2,svxz2,svxz2
|
|
||||||
|
|
||||||
# combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
|
|
||||||
vmovd y3,ruwy3x
|
|
||||||
vmovd w3,t1x
|
|
||||||
vpunpcklqdq t1,ruwy3,ruwy3
|
|
||||||
vmovd u3,t1x
|
|
||||||
vmovd r3,t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,ruwy3,ruwy3
|
|
||||||
vpslld $2,ruwy3,svxz3
|
|
||||||
vpaddd ruwy3,svxz3,svxz3
|
|
||||||
|
|
||||||
# combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
|
|
||||||
vmovd y4,ruwy4x
|
|
||||||
vmovd w4,t1x
|
|
||||||
vpunpcklqdq t1,ruwy4,ruwy4
|
|
||||||
vmovd u4,t1x
|
|
||||||
vmovd r4,t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,ruwy4,ruwy4
|
|
||||||
vpslld $2,ruwy4,svxz4
|
|
||||||
vpaddd ruwy4,svxz4,svxz4
|
|
||||||
|
|
||||||
.Ldoblock4:
|
|
||||||
# hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
|
|
||||||
# m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
|
|
||||||
vmovd 0x00(m),hc0x
|
|
||||||
vmovd 0x10(m),t1x
|
|
||||||
vpunpcklqdq t1,hc0,hc0
|
|
||||||
vmovd 0x20(m),t1x
|
|
||||||
vmovd 0x30(m),t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,hc0,hc0
|
|
||||||
vpand ANMASK(%rip),hc0,hc0
|
|
||||||
vmovd h0,t1x
|
|
||||||
vpaddd t1,hc0,hc0
|
|
||||||
# hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
|
|
||||||
# (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
|
|
||||||
vmovd 0x03(m),hc1x
|
|
||||||
vmovd 0x13(m),t1x
|
|
||||||
vpunpcklqdq t1,hc1,hc1
|
|
||||||
vmovd 0x23(m),t1x
|
|
||||||
vmovd 0x33(m),t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,hc1,hc1
|
|
||||||
vpsrld $2,hc1,hc1
|
|
||||||
vpand ANMASK(%rip),hc1,hc1
|
|
||||||
vmovd h1,t1x
|
|
||||||
vpaddd t1,hc1,hc1
|
|
||||||
# hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
|
|
||||||
# (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
|
|
||||||
vmovd 0x06(m),hc2x
|
|
||||||
vmovd 0x16(m),t1x
|
|
||||||
vpunpcklqdq t1,hc2,hc2
|
|
||||||
vmovd 0x26(m),t1x
|
|
||||||
vmovd 0x36(m),t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,hc2,hc2
|
|
||||||
vpsrld $4,hc2,hc2
|
|
||||||
vpand ANMASK(%rip),hc2,hc2
|
|
||||||
vmovd h2,t1x
|
|
||||||
vpaddd t1,hc2,hc2
|
|
||||||
# hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
|
|
||||||
# (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
|
|
||||||
vmovd 0x09(m),hc3x
|
|
||||||
vmovd 0x19(m),t1x
|
|
||||||
vpunpcklqdq t1,hc3,hc3
|
|
||||||
vmovd 0x29(m),t1x
|
|
||||||
vmovd 0x39(m),t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,hc3,hc3
|
|
||||||
vpsrld $6,hc3,hc3
|
|
||||||
vpand ANMASK(%rip),hc3,hc3
|
|
||||||
vmovd h3,t1x
|
|
||||||
vpaddd t1,hc3,hc3
|
|
||||||
# hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
|
|
||||||
# (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
|
|
||||||
vmovd 0x0c(m),hc4x
|
|
||||||
vmovd 0x1c(m),t1x
|
|
||||||
vpunpcklqdq t1,hc4,hc4
|
|
||||||
vmovd 0x2c(m),t1x
|
|
||||||
vmovd 0x3c(m),t2x
|
|
||||||
vpunpcklqdq t2,t1,t1
|
|
||||||
vperm2i128 $0x20,t1,hc4,hc4
|
|
||||||
vpsrld $8,hc4,hc4
|
|
||||||
vpor ORMASK(%rip),hc4,hc4
|
|
||||||
vmovd h4,t1x
|
|
||||||
vpaddd t1,hc4,hc4
|
|
||||||
|
|
||||||
# t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
|
|
||||||
vpmuludq hc0,ruwy0,t1
|
|
||||||
# t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
|
|
||||||
vpmuludq hc1,svxz4,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
|
|
||||||
vpmuludq hc2,svxz3,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
|
|
||||||
vpmuludq hc3,svxz2,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
|
|
||||||
vpmuludq hc4,svxz1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# d0 = t1[0] + t1[1] + t[2] + t[3]
|
|
||||||
vpermq $0xee,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vpsrldq $8,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vmovq t1x,d0
|
|
||||||
|
|
||||||
# t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
|
|
||||||
vpmuludq hc0,ruwy1,t1
|
|
||||||
# t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
|
|
||||||
vpmuludq hc1,ruwy0,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
|
|
||||||
vpmuludq hc2,svxz4,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
|
|
||||||
vpmuludq hc3,svxz3,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
|
|
||||||
vpmuludq hc4,svxz2,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# d1 = t1[0] + t1[1] + t1[3] + t1[4]
|
|
||||||
vpermq $0xee,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vpsrldq $8,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vmovq t1x,d1
|
|
||||||
|
|
||||||
# t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
|
|
||||||
vpmuludq hc0,ruwy2,t1
|
|
||||||
# t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
|
|
||||||
vpmuludq hc1,ruwy1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
|
|
||||||
vpmuludq hc2,ruwy0,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
|
|
||||||
vpmuludq hc3,svxz4,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
|
|
||||||
vpmuludq hc4,svxz3,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# d2 = t1[0] + t1[1] + t1[2] + t1[3]
|
|
||||||
vpermq $0xee,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vpsrldq $8,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vmovq t1x,d2
|
|
||||||
|
|
||||||
# t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
|
|
||||||
vpmuludq hc0,ruwy3,t1
|
|
||||||
# t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
|
|
||||||
vpmuludq hc1,ruwy2,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
|
|
||||||
vpmuludq hc2,ruwy1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
|
|
||||||
vpmuludq hc3,ruwy0,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
|
|
||||||
vpmuludq hc4,svxz4,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# d3 = t1[0] + t1[1] + t1[2] + t1[3]
|
|
||||||
vpermq $0xee,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vpsrldq $8,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vmovq t1x,d3
|
|
||||||
|
|
||||||
# t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
|
|
||||||
vpmuludq hc0,ruwy4,t1
|
|
||||||
# t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
|
|
||||||
vpmuludq hc1,ruwy3,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
|
|
||||||
vpmuludq hc2,ruwy2,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
|
|
||||||
vpmuludq hc3,ruwy1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
|
|
||||||
vpmuludq hc4,ruwy0,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
# d4 = t1[0] + t1[1] + t1[2] + t1[3]
|
|
||||||
vpermq $0xee,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vpsrldq $8,t1,t2
|
|
||||||
vpaddq t2,t1,t1
|
|
||||||
vmovq t1x,d4
|
|
||||||
|
|
||||||
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
|
|
||||||
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
|
|
||||||
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
|
|
||||||
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
|
|
||||||
# integers. It's true in a single-block implementation, but not here.
|
|
||||||
|
|
||||||
# d1 += d0 >> 26
|
|
||||||
mov d0,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d1
|
|
||||||
# h0 = d0 & 0x3ffffff
|
|
||||||
mov d0,%rbx
|
|
||||||
and $0x3ffffff,%ebx
|
|
||||||
|
|
||||||
# d2 += d1 >> 26
|
|
||||||
mov d1,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d2
|
|
||||||
# h1 = d1 & 0x3ffffff
|
|
||||||
mov d1,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h1
|
|
||||||
|
|
||||||
# d3 += d2 >> 26
|
|
||||||
mov d2,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d3
|
|
||||||
# h2 = d2 & 0x3ffffff
|
|
||||||
mov d2,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h2
|
|
||||||
|
|
||||||
# d4 += d3 >> 26
|
|
||||||
mov d3,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d4
|
|
||||||
# h3 = d3 & 0x3ffffff
|
|
||||||
mov d3,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h3
|
|
||||||
|
|
||||||
# h0 += (d4 >> 26) * 5
|
|
||||||
mov d4,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
lea (%rax,%rax,4),%rax
|
|
||||||
add %rax,%rbx
|
|
||||||
# h4 = d4 & 0x3ffffff
|
|
||||||
mov d4,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h4
|
|
||||||
|
|
||||||
# h1 += h0 >> 26
|
|
||||||
mov %rbx,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %eax,h1
|
|
||||||
# h0 = h0 & 0x3ffffff
|
|
||||||
andl $0x3ffffff,%ebx
|
|
||||||
mov %ebx,h0
|
|
||||||
|
|
||||||
add $0x40,m
|
|
||||||
dec %rcx
|
|
||||||
jnz .Ldoblock4
|
|
||||||
|
|
||||||
vzeroupper
|
|
||||||
pop %r13
|
|
||||||
pop %r12
|
|
||||||
pop %rbx
|
|
||||||
ret
|
|
||||||
SYM_FUNC_END(poly1305_4block_avx2)
|
|
@ -1,590 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
||||||
/*
|
|
||||||
* Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions
|
|
||||||
*
|
|
||||||
* Copyright (C) 2015 Martin Willi
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
|
|
||||||
.section .rodata.cst16.ANMASK, "aM", @progbits, 16
|
|
||||||
.align 16
|
|
||||||
ANMASK: .octa 0x0000000003ffffff0000000003ffffff
|
|
||||||
|
|
||||||
.section .rodata.cst16.ORMASK, "aM", @progbits, 16
|
|
||||||
.align 16
|
|
||||||
ORMASK: .octa 0x00000000010000000000000001000000
|
|
||||||
|
|
||||||
.text
|
|
||||||
|
|
||||||
#define h0 0x00(%rdi)
|
|
||||||
#define h1 0x04(%rdi)
|
|
||||||
#define h2 0x08(%rdi)
|
|
||||||
#define h3 0x0c(%rdi)
|
|
||||||
#define h4 0x10(%rdi)
|
|
||||||
#define r0 0x00(%rdx)
|
|
||||||
#define r1 0x04(%rdx)
|
|
||||||
#define r2 0x08(%rdx)
|
|
||||||
#define r3 0x0c(%rdx)
|
|
||||||
#define r4 0x10(%rdx)
|
|
||||||
#define s1 0x00(%rsp)
|
|
||||||
#define s2 0x04(%rsp)
|
|
||||||
#define s3 0x08(%rsp)
|
|
||||||
#define s4 0x0c(%rsp)
|
|
||||||
#define m %rsi
|
|
||||||
#define h01 %xmm0
|
|
||||||
#define h23 %xmm1
|
|
||||||
#define h44 %xmm2
|
|
||||||
#define t1 %xmm3
|
|
||||||
#define t2 %xmm4
|
|
||||||
#define t3 %xmm5
|
|
||||||
#define t4 %xmm6
|
|
||||||
#define mask %xmm7
|
|
||||||
#define d0 %r8
|
|
||||||
#define d1 %r9
|
|
||||||
#define d2 %r10
|
|
||||||
#define d3 %r11
|
|
||||||
#define d4 %r12
|
|
||||||
|
|
||||||
SYM_FUNC_START(poly1305_block_sse2)
|
|
||||||
# %rdi: Accumulator h[5]
|
|
||||||
# %rsi: 16 byte input block m
|
|
||||||
# %rdx: Poly1305 key r[5]
|
|
||||||
# %rcx: Block count
|
|
||||||
|
|
||||||
# This single block variant tries to improve performance by doing two
|
|
||||||
# multiplications in parallel using SSE instructions. There is quite
|
|
||||||
# some quardword packing involved, hence the speedup is marginal.
|
|
||||||
|
|
||||||
push %rbx
|
|
||||||
push %r12
|
|
||||||
sub $0x10,%rsp
|
|
||||||
|
|
||||||
# s1..s4 = r1..r4 * 5
|
|
||||||
mov r1,%eax
|
|
||||||
lea (%eax,%eax,4),%eax
|
|
||||||
mov %eax,s1
|
|
||||||
mov r2,%eax
|
|
||||||
lea (%eax,%eax,4),%eax
|
|
||||||
mov %eax,s2
|
|
||||||
mov r3,%eax
|
|
||||||
lea (%eax,%eax,4),%eax
|
|
||||||
mov %eax,s3
|
|
||||||
mov r4,%eax
|
|
||||||
lea (%eax,%eax,4),%eax
|
|
||||||
mov %eax,s4
|
|
||||||
|
|
||||||
movdqa ANMASK(%rip),mask
|
|
||||||
|
|
||||||
.Ldoblock:
|
|
||||||
# h01 = [0, h1, 0, h0]
|
|
||||||
# h23 = [0, h3, 0, h2]
|
|
||||||
# h44 = [0, h4, 0, h4]
|
|
||||||
movd h0,h01
|
|
||||||
movd h1,t1
|
|
||||||
movd h2,h23
|
|
||||||
movd h3,t2
|
|
||||||
movd h4,h44
|
|
||||||
punpcklqdq t1,h01
|
|
||||||
punpcklqdq t2,h23
|
|
||||||
punpcklqdq h44,h44
|
|
||||||
|
|
||||||
# h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ]
|
|
||||||
movd 0x00(m),t1
|
|
||||||
movd 0x03(m),t2
|
|
||||||
psrld $2,t2
|
|
||||||
punpcklqdq t2,t1
|
|
||||||
pand mask,t1
|
|
||||||
paddd t1,h01
|
|
||||||
# h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ]
|
|
||||||
movd 0x06(m),t1
|
|
||||||
movd 0x09(m),t2
|
|
||||||
psrld $4,t1
|
|
||||||
psrld $6,t2
|
|
||||||
punpcklqdq t2,t1
|
|
||||||
pand mask,t1
|
|
||||||
paddd t1,h23
|
|
||||||
# h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ]
|
|
||||||
mov 0x0c(m),%eax
|
|
||||||
shr $8,%eax
|
|
||||||
or $0x01000000,%eax
|
|
||||||
movd %eax,t1
|
|
||||||
pshufd $0xc4,t1,t1
|
|
||||||
paddd t1,h44
|
|
||||||
|
|
||||||
# t1[0] = h0 * r0 + h2 * s3
|
|
||||||
# t1[1] = h1 * s4 + h3 * s2
|
|
||||||
movd r0,t1
|
|
||||||
movd s4,t2
|
|
||||||
punpcklqdq t2,t1
|
|
||||||
pmuludq h01,t1
|
|
||||||
movd s3,t2
|
|
||||||
movd s2,t3
|
|
||||||
punpcklqdq t3,t2
|
|
||||||
pmuludq h23,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t2[0] = h0 * r1 + h2 * s4
|
|
||||||
# t2[1] = h1 * r0 + h3 * s3
|
|
||||||
movd r1,t2
|
|
||||||
movd r0,t3
|
|
||||||
punpcklqdq t3,t2
|
|
||||||
pmuludq h01,t2
|
|
||||||
movd s4,t3
|
|
||||||
movd s3,t4
|
|
||||||
punpcklqdq t4,t3
|
|
||||||
pmuludq h23,t3
|
|
||||||
paddq t3,t2
|
|
||||||
# t3[0] = h4 * s1
|
|
||||||
# t3[1] = h4 * s2
|
|
||||||
movd s1,t3
|
|
||||||
movd s2,t4
|
|
||||||
punpcklqdq t4,t3
|
|
||||||
pmuludq h44,t3
|
|
||||||
# d0 = t1[0] + t1[1] + t3[0]
|
|
||||||
# d1 = t2[0] + t2[1] + t3[1]
|
|
||||||
movdqa t1,t4
|
|
||||||
punpcklqdq t2,t4
|
|
||||||
punpckhqdq t2,t1
|
|
||||||
paddq t4,t1
|
|
||||||
paddq t3,t1
|
|
||||||
movq t1,d0
|
|
||||||
psrldq $8,t1
|
|
||||||
movq t1,d1
|
|
||||||
|
|
||||||
# t1[0] = h0 * r2 + h2 * r0
|
|
||||||
# t1[1] = h1 * r1 + h3 * s4
|
|
||||||
movd r2,t1
|
|
||||||
movd r1,t2
|
|
||||||
punpcklqdq t2,t1
|
|
||||||
pmuludq h01,t1
|
|
||||||
movd r0,t2
|
|
||||||
movd s4,t3
|
|
||||||
punpcklqdq t3,t2
|
|
||||||
pmuludq h23,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t2[0] = h0 * r3 + h2 * r1
|
|
||||||
# t2[1] = h1 * r2 + h3 * r0
|
|
||||||
movd r3,t2
|
|
||||||
movd r2,t3
|
|
||||||
punpcklqdq t3,t2
|
|
||||||
pmuludq h01,t2
|
|
||||||
movd r1,t3
|
|
||||||
movd r0,t4
|
|
||||||
punpcklqdq t4,t3
|
|
||||||
pmuludq h23,t3
|
|
||||||
paddq t3,t2
|
|
||||||
# t3[0] = h4 * s3
|
|
||||||
# t3[1] = h4 * s4
|
|
||||||
movd s3,t3
|
|
||||||
movd s4,t4
|
|
||||||
punpcklqdq t4,t3
|
|
||||||
pmuludq h44,t3
|
|
||||||
# d2 = t1[0] + t1[1] + t3[0]
|
|
||||||
# d3 = t2[0] + t2[1] + t3[1]
|
|
||||||
movdqa t1,t4
|
|
||||||
punpcklqdq t2,t4
|
|
||||||
punpckhqdq t2,t1
|
|
||||||
paddq t4,t1
|
|
||||||
paddq t3,t1
|
|
||||||
movq t1,d2
|
|
||||||
psrldq $8,t1
|
|
||||||
movq t1,d3
|
|
||||||
|
|
||||||
# t1[0] = h0 * r4 + h2 * r2
|
|
||||||
# t1[1] = h1 * r3 + h3 * r1
|
|
||||||
movd r4,t1
|
|
||||||
movd r3,t2
|
|
||||||
punpcklqdq t2,t1
|
|
||||||
pmuludq h01,t1
|
|
||||||
movd r2,t2
|
|
||||||
movd r1,t3
|
|
||||||
punpcklqdq t3,t2
|
|
||||||
pmuludq h23,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t3[0] = h4 * r0
|
|
||||||
movd r0,t3
|
|
||||||
pmuludq h44,t3
|
|
||||||
# d4 = t1[0] + t1[1] + t3[0]
|
|
||||||
movdqa t1,t4
|
|
||||||
psrldq $8,t4
|
|
||||||
paddq t4,t1
|
|
||||||
paddq t3,t1
|
|
||||||
movq t1,d4
|
|
||||||
|
|
||||||
# d1 += d0 >> 26
|
|
||||||
mov d0,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d1
|
|
||||||
# h0 = d0 & 0x3ffffff
|
|
||||||
mov d0,%rbx
|
|
||||||
and $0x3ffffff,%ebx
|
|
||||||
|
|
||||||
# d2 += d1 >> 26
|
|
||||||
mov d1,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d2
|
|
||||||
# h1 = d1 & 0x3ffffff
|
|
||||||
mov d1,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h1
|
|
||||||
|
|
||||||
# d3 += d2 >> 26
|
|
||||||
mov d2,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d3
|
|
||||||
# h2 = d2 & 0x3ffffff
|
|
||||||
mov d2,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h2
|
|
||||||
|
|
||||||
# d4 += d3 >> 26
|
|
||||||
mov d3,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d4
|
|
||||||
# h3 = d3 & 0x3ffffff
|
|
||||||
mov d3,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h3
|
|
||||||
|
|
||||||
# h0 += (d4 >> 26) * 5
|
|
||||||
mov d4,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
lea (%rax,%rax,4),%rax
|
|
||||||
add %rax,%rbx
|
|
||||||
# h4 = d4 & 0x3ffffff
|
|
||||||
mov d4,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h4
|
|
||||||
|
|
||||||
# h1 += h0 >> 26
|
|
||||||
mov %rbx,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %eax,h1
|
|
||||||
# h0 = h0 & 0x3ffffff
|
|
||||||
andl $0x3ffffff,%ebx
|
|
||||||
mov %ebx,h0
|
|
||||||
|
|
||||||
add $0x10,m
|
|
||||||
dec %rcx
|
|
||||||
jnz .Ldoblock
|
|
||||||
|
|
||||||
# Zeroing of key material
|
|
||||||
mov %rcx,0x00(%rsp)
|
|
||||||
mov %rcx,0x08(%rsp)
|
|
||||||
|
|
||||||
add $0x10,%rsp
|
|
||||||
pop %r12
|
|
||||||
pop %rbx
|
|
||||||
ret
|
|
||||||
SYM_FUNC_END(poly1305_block_sse2)
|
|
||||||
|
|
||||||
|
|
||||||
#define u0 0x00(%r8)
|
|
||||||
#define u1 0x04(%r8)
|
|
||||||
#define u2 0x08(%r8)
|
|
||||||
#define u3 0x0c(%r8)
|
|
||||||
#define u4 0x10(%r8)
|
|
||||||
#define hc0 %xmm0
|
|
||||||
#define hc1 %xmm1
|
|
||||||
#define hc2 %xmm2
|
|
||||||
#define hc3 %xmm5
|
|
||||||
#define hc4 %xmm6
|
|
||||||
#define ru0 %xmm7
|
|
||||||
#define ru1 %xmm8
|
|
||||||
#define ru2 %xmm9
|
|
||||||
#define ru3 %xmm10
|
|
||||||
#define ru4 %xmm11
|
|
||||||
#define sv1 %xmm12
|
|
||||||
#define sv2 %xmm13
|
|
||||||
#define sv3 %xmm14
|
|
||||||
#define sv4 %xmm15
|
|
||||||
#undef d0
|
|
||||||
#define d0 %r13
|
|
||||||
|
|
||||||
SYM_FUNC_START(poly1305_2block_sse2)
|
|
||||||
# %rdi: Accumulator h[5]
|
|
||||||
# %rsi: 16 byte input block m
|
|
||||||
# %rdx: Poly1305 key r[5]
|
|
||||||
# %rcx: Doubleblock count
|
|
||||||
# %r8: Poly1305 derived key r^2 u[5]
|
|
||||||
|
|
||||||
# This two-block variant further improves performance by using loop
|
|
||||||
# unrolled block processing. This is more straight forward and does
|
|
||||||
# less byte shuffling, but requires a second Poly1305 key r^2:
|
|
||||||
# h = (h + m) * r => h = (h + m1) * r^2 + m2 * r
|
|
||||||
|
|
||||||
push %rbx
|
|
||||||
push %r12
|
|
||||||
push %r13
|
|
||||||
|
|
||||||
# combine r0,u0
|
|
||||||
movd u0,ru0
|
|
||||||
movd r0,t1
|
|
||||||
punpcklqdq t1,ru0
|
|
||||||
|
|
||||||
# combine r1,u1 and s1=r1*5,v1=u1*5
|
|
||||||
movd u1,ru1
|
|
||||||
movd r1,t1
|
|
||||||
punpcklqdq t1,ru1
|
|
||||||
movdqa ru1,sv1
|
|
||||||
pslld $2,sv1
|
|
||||||
paddd ru1,sv1
|
|
||||||
|
|
||||||
# combine r2,u2 and s2=r2*5,v2=u2*5
|
|
||||||
movd u2,ru2
|
|
||||||
movd r2,t1
|
|
||||||
punpcklqdq t1,ru2
|
|
||||||
movdqa ru2,sv2
|
|
||||||
pslld $2,sv2
|
|
||||||
paddd ru2,sv2
|
|
||||||
|
|
||||||
# combine r3,u3 and s3=r3*5,v3=u3*5
|
|
||||||
movd u3,ru3
|
|
||||||
movd r3,t1
|
|
||||||
punpcklqdq t1,ru3
|
|
||||||
movdqa ru3,sv3
|
|
||||||
pslld $2,sv3
|
|
||||||
paddd ru3,sv3
|
|
||||||
|
|
||||||
# combine r4,u4 and s4=r4*5,v4=u4*5
|
|
||||||
movd u4,ru4
|
|
||||||
movd r4,t1
|
|
||||||
punpcklqdq t1,ru4
|
|
||||||
movdqa ru4,sv4
|
|
||||||
pslld $2,sv4
|
|
||||||
paddd ru4,sv4
|
|
||||||
|
|
||||||
.Ldoblock2:
|
|
||||||
# hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ]
|
|
||||||
movd 0x00(m),hc0
|
|
||||||
movd 0x10(m),t1
|
|
||||||
punpcklqdq t1,hc0
|
|
||||||
pand ANMASK(%rip),hc0
|
|
||||||
movd h0,t1
|
|
||||||
paddd t1,hc0
|
|
||||||
# hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ]
|
|
||||||
movd 0x03(m),hc1
|
|
||||||
movd 0x13(m),t1
|
|
||||||
punpcklqdq t1,hc1
|
|
||||||
psrld $2,hc1
|
|
||||||
pand ANMASK(%rip),hc1
|
|
||||||
movd h1,t1
|
|
||||||
paddd t1,hc1
|
|
||||||
# hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ]
|
|
||||||
movd 0x06(m),hc2
|
|
||||||
movd 0x16(m),t1
|
|
||||||
punpcklqdq t1,hc2
|
|
||||||
psrld $4,hc2
|
|
||||||
pand ANMASK(%rip),hc2
|
|
||||||
movd h2,t1
|
|
||||||
paddd t1,hc2
|
|
||||||
# hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ]
|
|
||||||
movd 0x09(m),hc3
|
|
||||||
movd 0x19(m),t1
|
|
||||||
punpcklqdq t1,hc3
|
|
||||||
psrld $6,hc3
|
|
||||||
pand ANMASK(%rip),hc3
|
|
||||||
movd h3,t1
|
|
||||||
paddd t1,hc3
|
|
||||||
# hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ]
|
|
||||||
movd 0x0c(m),hc4
|
|
||||||
movd 0x1c(m),t1
|
|
||||||
punpcklqdq t1,hc4
|
|
||||||
psrld $8,hc4
|
|
||||||
por ORMASK(%rip),hc4
|
|
||||||
movd h4,t1
|
|
||||||
paddd t1,hc4
|
|
||||||
|
|
||||||
# t1 = [ hc0[1] * r0, hc0[0] * u0 ]
|
|
||||||
movdqa ru0,t1
|
|
||||||
pmuludq hc0,t1
|
|
||||||
# t1 += [ hc1[1] * s4, hc1[0] * v4 ]
|
|
||||||
movdqa sv4,t2
|
|
||||||
pmuludq hc1,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc2[1] * s3, hc2[0] * v3 ]
|
|
||||||
movdqa sv3,t2
|
|
||||||
pmuludq hc2,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc3[1] * s2, hc3[0] * v2 ]
|
|
||||||
movdqa sv2,t2
|
|
||||||
pmuludq hc3,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc4[1] * s1, hc4[0] * v1 ]
|
|
||||||
movdqa sv1,t2
|
|
||||||
pmuludq hc4,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# d0 = t1[0] + t1[1]
|
|
||||||
movdqa t1,t2
|
|
||||||
psrldq $8,t2
|
|
||||||
paddq t2,t1
|
|
||||||
movq t1,d0
|
|
||||||
|
|
||||||
# t1 = [ hc0[1] * r1, hc0[0] * u1 ]
|
|
||||||
movdqa ru1,t1
|
|
||||||
pmuludq hc0,t1
|
|
||||||
# t1 += [ hc1[1] * r0, hc1[0] * u0 ]
|
|
||||||
movdqa ru0,t2
|
|
||||||
pmuludq hc1,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc2[1] * s4, hc2[0] * v4 ]
|
|
||||||
movdqa sv4,t2
|
|
||||||
pmuludq hc2,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc3[1] * s3, hc3[0] * v3 ]
|
|
||||||
movdqa sv3,t2
|
|
||||||
pmuludq hc3,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc4[1] * s2, hc4[0] * v2 ]
|
|
||||||
movdqa sv2,t2
|
|
||||||
pmuludq hc4,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# d1 = t1[0] + t1[1]
|
|
||||||
movdqa t1,t2
|
|
||||||
psrldq $8,t2
|
|
||||||
paddq t2,t1
|
|
||||||
movq t1,d1
|
|
||||||
|
|
||||||
# t1 = [ hc0[1] * r2, hc0[0] * u2 ]
|
|
||||||
movdqa ru2,t1
|
|
||||||
pmuludq hc0,t1
|
|
||||||
# t1 += [ hc1[1] * r1, hc1[0] * u1 ]
|
|
||||||
movdqa ru1,t2
|
|
||||||
pmuludq hc1,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc2[1] * r0, hc2[0] * u0 ]
|
|
||||||
movdqa ru0,t2
|
|
||||||
pmuludq hc2,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc3[1] * s4, hc3[0] * v4 ]
|
|
||||||
movdqa sv4,t2
|
|
||||||
pmuludq hc3,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc4[1] * s3, hc4[0] * v3 ]
|
|
||||||
movdqa sv3,t2
|
|
||||||
pmuludq hc4,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# d2 = t1[0] + t1[1]
|
|
||||||
movdqa t1,t2
|
|
||||||
psrldq $8,t2
|
|
||||||
paddq t2,t1
|
|
||||||
movq t1,d2
|
|
||||||
|
|
||||||
# t1 = [ hc0[1] * r3, hc0[0] * u3 ]
|
|
||||||
movdqa ru3,t1
|
|
||||||
pmuludq hc0,t1
|
|
||||||
# t1 += [ hc1[1] * r2, hc1[0] * u2 ]
|
|
||||||
movdqa ru2,t2
|
|
||||||
pmuludq hc1,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc2[1] * r1, hc2[0] * u1 ]
|
|
||||||
movdqa ru1,t2
|
|
||||||
pmuludq hc2,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc3[1] * r0, hc3[0] * u0 ]
|
|
||||||
movdqa ru0,t2
|
|
||||||
pmuludq hc3,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc4[1] * s4, hc4[0] * v4 ]
|
|
||||||
movdqa sv4,t2
|
|
||||||
pmuludq hc4,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# d3 = t1[0] + t1[1]
|
|
||||||
movdqa t1,t2
|
|
||||||
psrldq $8,t2
|
|
||||||
paddq t2,t1
|
|
||||||
movq t1,d3
|
|
||||||
|
|
||||||
# t1 = [ hc0[1] * r4, hc0[0] * u4 ]
|
|
||||||
movdqa ru4,t1
|
|
||||||
pmuludq hc0,t1
|
|
||||||
# t1 += [ hc1[1] * r3, hc1[0] * u3 ]
|
|
||||||
movdqa ru3,t2
|
|
||||||
pmuludq hc1,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc2[1] * r2, hc2[0] * u2 ]
|
|
||||||
movdqa ru2,t2
|
|
||||||
pmuludq hc2,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc3[1] * r1, hc3[0] * u1 ]
|
|
||||||
movdqa ru1,t2
|
|
||||||
pmuludq hc3,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# t1 += [ hc4[1] * r0, hc4[0] * u0 ]
|
|
||||||
movdqa ru0,t2
|
|
||||||
pmuludq hc4,t2
|
|
||||||
paddq t2,t1
|
|
||||||
# d4 = t1[0] + t1[1]
|
|
||||||
movdqa t1,t2
|
|
||||||
psrldq $8,t2
|
|
||||||
paddq t2,t1
|
|
||||||
movq t1,d4
|
|
||||||
|
|
||||||
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
|
|
||||||
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
|
|
||||||
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
|
|
||||||
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
|
|
||||||
# integers. It's true in a single-block implementation, but not here.
|
|
||||||
|
|
||||||
# d1 += d0 >> 26
|
|
||||||
mov d0,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d1
|
|
||||||
# h0 = d0 & 0x3ffffff
|
|
||||||
mov d0,%rbx
|
|
||||||
and $0x3ffffff,%ebx
|
|
||||||
|
|
||||||
# d2 += d1 >> 26
|
|
||||||
mov d1,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d2
|
|
||||||
# h1 = d1 & 0x3ffffff
|
|
||||||
mov d1,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h1
|
|
||||||
|
|
||||||
# d3 += d2 >> 26
|
|
||||||
mov d2,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d3
|
|
||||||
# h2 = d2 & 0x3ffffff
|
|
||||||
mov d2,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h2
|
|
||||||
|
|
||||||
# d4 += d3 >> 26
|
|
||||||
mov d3,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %rax,d4
|
|
||||||
# h3 = d3 & 0x3ffffff
|
|
||||||
mov d3,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h3
|
|
||||||
|
|
||||||
# h0 += (d4 >> 26) * 5
|
|
||||||
mov d4,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
lea (%rax,%rax,4),%rax
|
|
||||||
add %rax,%rbx
|
|
||||||
# h4 = d4 & 0x3ffffff
|
|
||||||
mov d4,%rax
|
|
||||||
and $0x3ffffff,%eax
|
|
||||||
mov %eax,h4
|
|
||||||
|
|
||||||
# h1 += h0 >> 26
|
|
||||||
mov %rbx,%rax
|
|
||||||
shr $26,%rax
|
|
||||||
add %eax,h1
|
|
||||||
# h0 = h0 & 0x3ffffff
|
|
||||||
andl $0x3ffffff,%ebx
|
|
||||||
mov %ebx,h0
|
|
||||||
|
|
||||||
add $0x20,m
|
|
||||||
dec %rcx
|
|
||||||
jnz .Ldoblock2
|
|
||||||
|
|
||||||
pop %r13
|
|
||||||
pop %r12
|
|
||||||
pop %rbx
|
|
||||||
ret
|
|
||||||
SYM_FUNC_END(poly1305_2block_sse2)
|
|
4265
arch/x86/crypto/poly1305-x86_64-cryptogams.pl
Normal file
4265
arch/x86/crypto/poly1305-x86_64-cryptogams.pl
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,6 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||||
/*
|
/*
|
||||||
* Poly1305 authenticator algorithm, RFC7539, SIMD glue code
|
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||||
*
|
|
||||||
* Copyright (C) 2015 Martin Willi
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/algapi.h>
|
||||||
@ -13,108 +11,166 @@
|
|||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/simd.h>
|
#include <asm/simd.h>
|
||||||
|
|
||||||
asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
|
asmlinkage void poly1305_init_x86_64(void *ctx,
|
||||||
const u32 *r, unsigned int blocks);
|
const u8 key[POLY1305_KEY_SIZE]);
|
||||||
asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
|
asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
|
||||||
unsigned int blocks, const u32 *u);
|
const size_t len, const u32 padbit);
|
||||||
asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
|
asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
|
||||||
unsigned int blocks, const u32 *u);
|
const u32 nonce[4]);
|
||||||
|
asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
|
||||||
|
const u32 nonce[4]);
|
||||||
|
asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len,
|
||||||
|
const u32 padbit);
|
||||||
|
asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len,
|
||||||
|
const u32 padbit);
|
||||||
|
asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp,
|
||||||
|
const size_t len, const u32 padbit);
|
||||||
|
|
||||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd);
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx);
|
||||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
|
||||||
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512);
|
||||||
|
|
||||||
static void poly1305_simd_mult(u32 *a, const u32 *b)
|
struct poly1305_arch_internal {
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
u32 h[5];
|
||||||
|
u32 is_base2_26;
|
||||||
|
};
|
||||||
|
u64 hs[3];
|
||||||
|
};
|
||||||
|
u64 r[2];
|
||||||
|
u64 pad;
|
||||||
|
struct { u32 r2, r1, r4, r3; } rn[9];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit
|
||||||
|
* the unfortunate situation of using AVX and then having to go back to scalar
|
||||||
|
* -- because the user is silly and has called the update function from two
|
||||||
|
* separate contexts -- then we need to convert back to the original base before
|
||||||
|
* proceeding. It is possible to reason that the initial reduction below is
|
||||||
|
* sufficient given the implementation invariants. However, for an avoidance of
|
||||||
|
* doubt and because this is not performance critical, we do the full reduction
|
||||||
|
* anyway. Z3 proof of below function: https://xn--4db.cc/ltPtHCKN/py
|
||||||
|
*/
|
||||||
|
static void convert_to_base2_64(void *ctx)
|
||||||
{
|
{
|
||||||
u8 m[POLY1305_BLOCK_SIZE];
|
struct poly1305_arch_internal *state = ctx;
|
||||||
|
u32 cy;
|
||||||
|
|
||||||
memset(m, 0, sizeof(m));
|
if (!state->is_base2_26)
|
||||||
/* The poly1305 block function adds a hi-bit to the accumulator which
|
return;
|
||||||
* we don't need for key multiplication; compensate for it. */
|
|
||||||
a[4] -= 1 << 24;
|
cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy;
|
||||||
poly1305_block_sse2(a, m, b, 1);
|
cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy;
|
||||||
|
cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy;
|
||||||
|
cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy;
|
||||||
|
state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0];
|
||||||
|
state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12);
|
||||||
|
state->hs[2] = state->h[4] >> 24;
|
||||||
|
#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
|
||||||
|
cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL);
|
||||||
|
state->hs[2] &= 3;
|
||||||
|
state->hs[0] += cy;
|
||||||
|
state->hs[1] += (cy = ULT(state->hs[0], cy));
|
||||||
|
state->hs[2] += ULT(state->hs[1], cy);
|
||||||
|
#undef ULT
|
||||||
|
state->is_base2_26 = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx,
|
static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
|
||||||
const u8 *src, unsigned int srclen)
|
|
||||||
{
|
{
|
||||||
unsigned int datalen;
|
poly1305_init_x86_64(ctx, key);
|
||||||
|
|
||||||
if (unlikely(!dctx->sset)) {
|
|
||||||
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
|
|
||||||
src += srclen - datalen;
|
|
||||||
srclen = datalen;
|
|
||||||
}
|
|
||||||
if (srclen >= POLY1305_BLOCK_SIZE) {
|
|
||||||
poly1305_core_blocks(&dctx->h, dctx->r, src,
|
|
||||||
srclen / POLY1305_BLOCK_SIZE, 1);
|
|
||||||
srclen %= POLY1305_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
return srclen;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
|
static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
|
||||||
const u8 *src, unsigned int srclen)
|
const u32 padbit)
|
||||||
{
|
{
|
||||||
unsigned int blocks, datalen;
|
struct poly1305_arch_internal *state = ctx;
|
||||||
|
|
||||||
if (unlikely(!dctx->sset)) {
|
/* SIMD disables preemption, so relax after processing each page. */
|
||||||
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
|
BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
|
||||||
src += srclen - datalen;
|
PAGE_SIZE % POLY1305_BLOCK_SIZE);
|
||||||
srclen = datalen;
|
|
||||||
|
if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) ||
|
||||||
|
(len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
|
||||||
|
!crypto_simd_usable()) {
|
||||||
|
convert_to_base2_64(ctx);
|
||||||
|
poly1305_blocks_x86_64(ctx, inp, len, padbit);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_AS_AVX2) &&
|
for (;;) {
|
||||||
static_branch_likely(&poly1305_use_avx2) &&
|
const size_t bytes = min_t(size_t, len, PAGE_SIZE);
|
||||||
srclen >= POLY1305_BLOCK_SIZE * 4) {
|
|
||||||
if (unlikely(dctx->rset < 4)) {
|
kernel_fpu_begin();
|
||||||
if (dctx->rset < 2) {
|
if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
|
||||||
dctx->r[1] = dctx->r[0];
|
poly1305_blocks_avx512(ctx, inp, bytes, padbit);
|
||||||
poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
|
else if (IS_ENABLED(CONFIG_AS_AVX2) && static_branch_likely(&poly1305_use_avx2))
|
||||||
|
poly1305_blocks_avx2(ctx, inp, bytes, padbit);
|
||||||
|
else
|
||||||
|
poly1305_blocks_avx(ctx, inp, bytes, padbit);
|
||||||
|
kernel_fpu_end();
|
||||||
|
len -= bytes;
|
||||||
|
if (!len)
|
||||||
|
break;
|
||||||
|
inp += bytes;
|
||||||
}
|
}
|
||||||
dctx->r[2] = dctx->r[1];
|
|
||||||
poly1305_simd_mult(dctx->r[2].r, dctx->r[0].r);
|
|
||||||
dctx->r[3] = dctx->r[2];
|
|
||||||
poly1305_simd_mult(dctx->r[3].r, dctx->r[0].r);
|
|
||||||
dctx->rset = 4;
|
|
||||||
}
|
|
||||||
blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
|
|
||||||
poly1305_4block_avx2(dctx->h.h, src, dctx->r[0].r, blocks,
|
|
||||||
dctx->r[1].r);
|
|
||||||
src += POLY1305_BLOCK_SIZE * 4 * blocks;
|
|
||||||
srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
|
static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
|
||||||
if (unlikely(dctx->rset < 2)) {
|
const u32 nonce[4])
|
||||||
dctx->r[1] = dctx->r[0];
|
|
||||||
poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
|
|
||||||
dctx->rset = 2;
|
|
||||||
}
|
|
||||||
blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
|
|
||||||
poly1305_2block_sse2(dctx->h.h, src, dctx->r[0].r,
|
|
||||||
blocks, dctx->r[1].r);
|
|
||||||
src += POLY1305_BLOCK_SIZE * 2 * blocks;
|
|
||||||
srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
|
|
||||||
}
|
|
||||||
if (srclen >= POLY1305_BLOCK_SIZE) {
|
|
||||||
poly1305_block_sse2(dctx->h.h, src, dctx->r[0].r, 1);
|
|
||||||
srclen -= POLY1305_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
return srclen;
|
|
||||||
}
|
|
||||||
|
|
||||||
void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key)
|
|
||||||
{
|
{
|
||||||
poly1305_init_generic(desc, key);
|
if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx))
|
||||||
|
poly1305_emit_x86_64(ctx, mac, nonce);
|
||||||
|
else
|
||||||
|
poly1305_emit_avx(ctx, mac, nonce);
|
||||||
|
}
|
||||||
|
|
||||||
|
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
|
||||||
|
{
|
||||||
|
poly1305_simd_init(&dctx->h, key);
|
||||||
|
dctx->s[0] = get_unaligned_le32(&key[16]);
|
||||||
|
dctx->s[1] = get_unaligned_le32(&key[20]);
|
||||||
|
dctx->s[2] = get_unaligned_le32(&key[24]);
|
||||||
|
dctx->s[3] = get_unaligned_le32(&key[28]);
|
||||||
|
dctx->buflen = 0;
|
||||||
|
dctx->sset = true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(poly1305_init_arch);
|
EXPORT_SYMBOL(poly1305_init_arch);
|
||||||
|
|
||||||
|
static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
|
||||||
|
const u8 *inp, unsigned int len)
|
||||||
|
{
|
||||||
|
unsigned int acc = 0;
|
||||||
|
if (unlikely(!dctx->sset)) {
|
||||||
|
if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
|
||||||
|
poly1305_simd_init(&dctx->h, inp);
|
||||||
|
inp += POLY1305_BLOCK_SIZE;
|
||||||
|
len -= POLY1305_BLOCK_SIZE;
|
||||||
|
acc += POLY1305_BLOCK_SIZE;
|
||||||
|
dctx->rset = 1;
|
||||||
|
}
|
||||||
|
if (len >= POLY1305_BLOCK_SIZE) {
|
||||||
|
dctx->s[0] = get_unaligned_le32(&inp[0]);
|
||||||
|
dctx->s[1] = get_unaligned_le32(&inp[4]);
|
||||||
|
dctx->s[2] = get_unaligned_le32(&inp[8]);
|
||||||
|
dctx->s[3] = get_unaligned_le32(&inp[12]);
|
||||||
|
inp += POLY1305_BLOCK_SIZE;
|
||||||
|
len -= POLY1305_BLOCK_SIZE;
|
||||||
|
acc += POLY1305_BLOCK_SIZE;
|
||||||
|
dctx->sset = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return acc;
|
||||||
|
}
|
||||||
|
|
||||||
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||||
unsigned int srclen)
|
unsigned int srclen)
|
||||||
{
|
{
|
||||||
unsigned int bytes;
|
unsigned int bytes, used;
|
||||||
|
|
||||||
if (unlikely(dctx->buflen)) {
|
if (unlikely(dctx->buflen)) {
|
||||||
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
|
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||||
@ -124,31 +180,19 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
|||||||
dctx->buflen += bytes;
|
dctx->buflen += bytes;
|
||||||
|
|
||||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||||
if (static_branch_likely(&poly1305_use_simd) &&
|
if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, POLY1305_BLOCK_SIZE)))
|
||||||
likely(crypto_simd_usable())) {
|
poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1);
|
||||||
kernel_fpu_begin();
|
|
||||||
poly1305_simd_blocks(dctx, dctx->buf,
|
|
||||||
POLY1305_BLOCK_SIZE);
|
|
||||||
kernel_fpu_end();
|
|
||||||
} else {
|
|
||||||
poly1305_scalar_blocks(dctx, dctx->buf,
|
|
||||||
POLY1305_BLOCK_SIZE);
|
|
||||||
}
|
|
||||||
dctx->buflen = 0;
|
dctx->buflen = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
|
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
|
||||||
if (static_branch_likely(&poly1305_use_simd) &&
|
bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
|
||||||
likely(crypto_simd_usable())) {
|
srclen -= bytes;
|
||||||
kernel_fpu_begin();
|
used = crypto_poly1305_setdctxkey(dctx, src, bytes);
|
||||||
bytes = poly1305_simd_blocks(dctx, src, srclen);
|
if (likely(bytes - used))
|
||||||
kernel_fpu_end();
|
poly1305_simd_blocks(&dctx->h, src + used, bytes - used, 1);
|
||||||
} else {
|
src += bytes;
|
||||||
bytes = poly1305_scalar_blocks(dctx, src, srclen);
|
|
||||||
}
|
|
||||||
src += srclen - bytes;
|
|
||||||
srclen = bytes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(srclen)) {
|
if (unlikely(srclen)) {
|
||||||
@ -158,9 +202,17 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(poly1305_update_arch);
|
EXPORT_SYMBOL(poly1305_update_arch);
|
||||||
|
|
||||||
void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest)
|
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
||||||
{
|
{
|
||||||
poly1305_final_generic(desc, digest);
|
if (unlikely(dctx->buflen)) {
|
||||||
|
dctx->buf[dctx->buflen++] = 1;
|
||||||
|
memset(dctx->buf + dctx->buflen, 0,
|
||||||
|
POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||||
|
poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
poly1305_simd_emit(&dctx->h, dst, dctx->s);
|
||||||
|
*dctx = (struct poly1305_desc_ctx){};
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(poly1305_final_arch);
|
EXPORT_SYMBOL(poly1305_final_arch);
|
||||||
|
|
||||||
@ -168,11 +220,16 @@ static int crypto_poly1305_init(struct shash_desc *desc)
|
|||||||
{
|
{
|
||||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
poly1305_core_init(&dctx->h);
|
*dctx = (struct poly1305_desc_ctx){};
|
||||||
dctx->buflen = 0;
|
return 0;
|
||||||
dctx->rset = 0;
|
}
|
||||||
dctx->sset = false;
|
|
||||||
|
|
||||||
|
static int crypto_poly1305_update(struct shash_desc *desc,
|
||||||
|
const u8 *src, unsigned int srclen)
|
||||||
|
{
|
||||||
|
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
poly1305_update_arch(dctx, src, srclen);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,23 +240,14 @@ static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
|
|||||||
if (unlikely(!dctx->sset))
|
if (unlikely(!dctx->sset))
|
||||||
return -ENOKEY;
|
return -ENOKEY;
|
||||||
|
|
||||||
poly1305_final_generic(dctx, dst);
|
poly1305_final_arch(dctx, dst);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int poly1305_simd_update(struct shash_desc *desc,
|
|
||||||
const u8 *src, unsigned int srclen)
|
|
||||||
{
|
|
||||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
||||||
|
|
||||||
poly1305_update_arch(dctx, src, srclen);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct shash_alg alg = {
|
static struct shash_alg alg = {
|
||||||
.digestsize = POLY1305_DIGEST_SIZE,
|
.digestsize = POLY1305_DIGEST_SIZE,
|
||||||
.init = crypto_poly1305_init,
|
.init = crypto_poly1305_init,
|
||||||
.update = poly1305_simd_update,
|
.update = crypto_poly1305_update,
|
||||||
.final = crypto_poly1305_final,
|
.final = crypto_poly1305_final,
|
||||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
@ -213,17 +261,19 @@ static struct shash_alg alg = {
|
|||||||
|
|
||||||
static int __init poly1305_simd_mod_init(void)
|
static int __init poly1305_simd_mod_init(void)
|
||||||
{
|
{
|
||||||
if (!boot_cpu_has(X86_FEATURE_XMM2))
|
if (IS_ENABLED(CONFIG_AS_AVX) && boot_cpu_has(X86_FEATURE_AVX) &&
|
||||||
return 0;
|
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
|
||||||
|
static_branch_enable(&poly1305_use_avx);
|
||||||
static_branch_enable(&poly1305_use_simd);
|
if (IS_ENABLED(CONFIG_AS_AVX2) && boot_cpu_has(X86_FEATURE_AVX) &&
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_AS_AVX2) &&
|
|
||||||
boot_cpu_has(X86_FEATURE_AVX) &&
|
|
||||||
boot_cpu_has(X86_FEATURE_AVX2) &&
|
boot_cpu_has(X86_FEATURE_AVX2) &&
|
||||||
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
|
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
|
||||||
static_branch_enable(&poly1305_use_avx2);
|
static_branch_enable(&poly1305_use_avx2);
|
||||||
|
if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) &&
|
||||||
|
boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
|
||||||
|
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
|
||||||
|
/* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */
|
||||||
|
boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X)
|
||||||
|
static_branch_enable(&poly1305_use_avx512);
|
||||||
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0;
|
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,7 +287,7 @@ module_init(poly1305_simd_mod_init);
|
|||||||
module_exit(poly1305_simd_mod_exit);
|
module_exit(poly1305_simd_mod_exit);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
|
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
|
||||||
MODULE_DESCRIPTION("Poly1305 authenticator");
|
MODULE_DESCRIPTION("Poly1305 authenticator");
|
||||||
MODULE_ALIAS_CRYPTO("poly1305");
|
MODULE_ALIAS_CRYPTO("poly1305");
|
||||||
MODULE_ALIAS_CRYPTO("poly1305-simd");
|
MODULE_ALIAS_CRYPTO("poly1305-simd");
|
||||||
|
@ -19,18 +19,16 @@
|
|||||||
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
|
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
|
||||||
|
|
||||||
/* 16-way AVX2 parallel cipher functions */
|
/* 16-way AVX2 parallel cipher functions */
|
||||||
asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src);
|
|
||||||
|
|
||||||
asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src,
|
asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
|
le128 *iv);
|
||||||
|
asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
|
le128 *iv);
|
||||||
|
asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
|
|
||||||
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
|
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||||
const u8 *key, unsigned int keylen)
|
const u8 *key, unsigned int keylen)
|
||||||
@ -44,13 +42,13 @@ static const struct common_glue_ctx serpent_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 16,
|
.num_blocks = 16,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) }
|
.fn_u = { .ecb = serpent_ecb_enc_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
|
.fn_u = { .ecb = serpent_ecb_enc_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
|
.fn_u = { .ecb = __serpent_encrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -60,13 +58,13 @@ static const struct common_glue_ctx serpent_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 16,
|
.num_blocks = 16,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) }
|
.fn_u = { .ctr = serpent_ctr_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
|
.fn_u = { .ctr = serpent_ctr_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
|
.fn_u = { .ctr = __serpent_crypt_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -76,13 +74,13 @@ static const struct common_glue_ctx serpent_enc_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 16,
|
.num_blocks = 16,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) }
|
.fn_u = { .xts = serpent_xts_enc_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
|
.fn_u = { .xts = serpent_xts_enc_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
|
.fn_u = { .xts = serpent_xts_enc }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -92,13 +90,13 @@ static const struct common_glue_ctx serpent_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 16,
|
.num_blocks = 16,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) }
|
.fn_u = { .ecb = serpent_ecb_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
|
.fn_u = { .ecb = serpent_ecb_dec_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
|
.fn_u = { .ecb = __serpent_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -108,13 +106,13 @@ static const struct common_glue_ctx serpent_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 16,
|
.num_blocks = 16,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) }
|
.fn_u = { .cbc = serpent_cbc_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
|
.fn_u = { .cbc = serpent_cbc_dec_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
|
.fn_u = { .cbc = __serpent_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -124,13 +122,13 @@ static const struct common_glue_ctx serpent_dec_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 16,
|
.num_blocks = 16,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) }
|
.fn_u = { .xts = serpent_xts_dec_16way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 8,
|
.num_blocks = 8,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
|
.fn_u = { .xts = serpent_xts_dec_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
|
.fn_u = { .xts = serpent_xts_dec }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -146,8 +144,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
|
return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
@ -166,8 +163,8 @@ static int xts_encrypt(struct skcipher_request *req)
|
|||||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&serpent_enc_xts, req,
|
return glue_xts_req_128bit(&serpent_enc_xts, req,
|
||||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
__serpent_encrypt, &ctx->tweak_ctx,
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
&ctx->crypt_ctx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xts_decrypt(struct skcipher_request *req)
|
static int xts_decrypt(struct skcipher_request *req)
|
||||||
@ -176,8 +173,8 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&serpent_dec_xts, req,
|
return glue_xts_req_128bit(&serpent_dec_xts, req,
|
||||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
__serpent_encrypt, &ctx->tweak_ctx,
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
&ctx->crypt_ctx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct skcipher_alg serpent_algs[] = {
|
static struct skcipher_alg serpent_algs[] = {
|
||||||
|
@ -20,33 +20,35 @@
|
|||||||
#include <asm/crypto/serpent-avx.h>
|
#include <asm/crypto/serpent-avx.h>
|
||||||
|
|
||||||
/* 8-way parallel cipher functions */
|
/* 8-way parallel cipher functions */
|
||||||
asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
|
EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
|
||||||
|
|
||||||
asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
|
EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
|
||||||
|
|
||||||
asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
|
EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
|
||||||
|
|
||||||
asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
|
EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
|
||||||
|
|
||||||
asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src, le128 *iv);
|
const u8 *src, le128 *iv);
|
||||||
EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
|
EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
|
||||||
|
|
||||||
asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src, le128 *iv);
|
const u8 *src, le128 *iv);
|
||||||
EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
|
EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
|
||||||
|
|
||||||
void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
|
||||||
{
|
{
|
||||||
be128 ctrblk;
|
be128 ctrblk;
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
le128_to_be128(&ctrblk, iv);
|
le128_to_be128(&ctrblk, iv);
|
||||||
le128_inc(iv);
|
le128_inc(iv);
|
||||||
@ -56,17 +58,15 @@ void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__serpent_crypt_ctr);
|
EXPORT_SYMBOL_GPL(__serpent_crypt_ctr);
|
||||||
|
|
||||||
void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt);
|
||||||
GLUE_FUNC_CAST(__serpent_encrypt));
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(serpent_xts_enc);
|
EXPORT_SYMBOL_GPL(serpent_xts_enc);
|
||||||
|
|
||||||
void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt);
|
||||||
GLUE_FUNC_CAST(__serpent_decrypt));
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(serpent_xts_dec);
|
EXPORT_SYMBOL_GPL(serpent_xts_dec);
|
||||||
|
|
||||||
@ -102,10 +102,10 @@ static const struct common_glue_ctx serpent_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
|
.fn_u = { .ecb = serpent_ecb_enc_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
|
.fn_u = { .ecb = __serpent_encrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -115,10 +115,10 @@ static const struct common_glue_ctx serpent_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
|
.fn_u = { .ctr = serpent_ctr_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
|
.fn_u = { .ctr = __serpent_crypt_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -128,10 +128,10 @@ static const struct common_glue_ctx serpent_enc_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
|
.fn_u = { .xts = serpent_xts_enc_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
|
.fn_u = { .xts = serpent_xts_enc }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -141,10 +141,10 @@ static const struct common_glue_ctx serpent_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
|
.fn_u = { .ecb = serpent_ecb_dec_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
|
.fn_u = { .ecb = __serpent_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -154,10 +154,10 @@ static const struct common_glue_ctx serpent_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
|
.fn_u = { .cbc = serpent_cbc_dec_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
|
.fn_u = { .cbc = __serpent_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -167,10 +167,10 @@ static const struct common_glue_ctx serpent_dec_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
|
.fn_u = { .xts = serpent_xts_dec_8way_avx }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
|
.fn_u = { .xts = serpent_xts_dec }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -186,8 +186,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
|
return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
@ -206,8 +205,8 @@ static int xts_encrypt(struct skcipher_request *req)
|
|||||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&serpent_enc_xts, req,
|
return glue_xts_req_128bit(&serpent_enc_xts, req,
|
||||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
__serpent_encrypt, &ctx->tweak_ctx,
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
&ctx->crypt_ctx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xts_decrypt(struct skcipher_request *req)
|
static int xts_decrypt(struct skcipher_request *req)
|
||||||
@ -216,8 +215,8 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&serpent_dec_xts, req,
|
return glue_xts_req_128bit(&serpent_dec_xts, req,
|
||||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
__serpent_encrypt, &ctx->tweak_ctx,
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
&ctx->crypt_ctx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct skcipher_alg serpent_algs[] = {
|
static struct skcipher_alg serpent_algs[] = {
|
||||||
|
@ -31,9 +31,11 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
|
|||||||
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
|
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
|
static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s)
|
||||||
{
|
{
|
||||||
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
|
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
unsigned int j;
|
unsigned int j;
|
||||||
|
|
||||||
for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
|
for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
|
||||||
@ -45,9 +47,11 @@ static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
|
|||||||
u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
|
u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
|
||||||
{
|
{
|
||||||
be128 ctrblk;
|
be128 ctrblk;
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
le128_to_be128(&ctrblk, iv);
|
le128_to_be128(&ctrblk, iv);
|
||||||
le128_inc(iv);
|
le128_inc(iv);
|
||||||
@ -56,10 +60,12 @@ static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
|||||||
u128_xor(dst, src, (u128 *)&ctrblk);
|
u128_xor(dst, src, (u128 *)&ctrblk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
|
static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s,
|
||||||
le128 *iv)
|
le128 *iv)
|
||||||
{
|
{
|
||||||
be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
|
be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
|
for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
|
||||||
@ -79,10 +85,10 @@ static const struct common_glue_ctx serpent_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
|
.fn_u = { .ecb = serpent_enc_blk_xway }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
|
.fn_u = { .ecb = __serpent_encrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -92,10 +98,10 @@ static const struct common_glue_ctx serpent_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
|
.fn_u = { .ctr = serpent_crypt_ctr_xway }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
|
.fn_u = { .ctr = serpent_crypt_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -105,10 +111,10 @@ static const struct common_glue_ctx serpent_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
|
.fn_u = { .ecb = serpent_dec_blk_xway }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
|
.fn_u = { .ecb = __serpent_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -118,10 +124,10 @@ static const struct common_glue_ctx serpent_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
.num_blocks = SERPENT_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
|
.fn_u = { .cbc = serpent_decrypt_cbc_xway }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
|
.fn_u = { .cbc = __serpent_decrypt }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -137,7 +143,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
|
return glue_cbc_encrypt_req_128bit(__serpent_encrypt,
|
||||||
req);
|
req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,11 +62,11 @@
|
|||||||
*Visit http://software.intel.com/en-us/articles/
|
*Visit http://software.intel.com/en-us/articles/
|
||||||
*and refer to improving-the-performance-of-the-secure-hash-algorithm-1/
|
*and refer to improving-the-performance-of-the-secure-hash-algorithm-1/
|
||||||
*
|
*
|
||||||
*Updates 20-byte SHA-1 record in 'hash' for even number of
|
*Updates 20-byte SHA-1 record at start of 'state', from 'input', for
|
||||||
*'num_blocks' consecutive 64-byte blocks
|
*even number of 'blocks' consecutive 64-byte blocks.
|
||||||
*
|
*
|
||||||
*extern "C" void sha1_transform_avx2(
|
*extern "C" void sha1_transform_avx2(
|
||||||
* int *hash, const char* input, size_t num_blocks );
|
* struct sha1_state *state, const u8* input, int blocks );
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
@ -457,9 +457,13 @@ W_PRECALC_SSSE3
|
|||||||
movdqu \a,\b
|
movdqu \a,\b
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* SSSE3 optimized implementation:
|
/*
|
||||||
* extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws,
|
* SSSE3 optimized implementation:
|
||||||
* unsigned int rounds);
|
*
|
||||||
|
* extern "C" void sha1_transform_ssse3(struct sha1_state *state,
|
||||||
|
* const u8 *data, int blocks);
|
||||||
|
*
|
||||||
|
* Note that struct sha1_state is assumed to begin with u32 state[5].
|
||||||
*/
|
*/
|
||||||
SHA1_VECTOR_ASM sha1_transform_ssse3
|
SHA1_VECTOR_ASM sha1_transform_ssse3
|
||||||
|
|
||||||
@ -545,8 +549,8 @@ W_PRECALC_AVX
|
|||||||
|
|
||||||
|
|
||||||
/* AVX optimized implementation:
|
/* AVX optimized implementation:
|
||||||
* extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws,
|
* extern "C" void sha1_transform_avx(struct sha1_state *state,
|
||||||
* unsigned int rounds);
|
* const u8 *data, int blocks);
|
||||||
*/
|
*/
|
||||||
SHA1_VECTOR_ASM sha1_transform_avx
|
SHA1_VECTOR_ASM sha1_transform_avx
|
||||||
|
|
||||||
|
@ -27,11 +27,8 @@
|
|||||||
#include <crypto/sha1_base.h>
|
#include <crypto/sha1_base.h>
|
||||||
#include <asm/simd.h>
|
#include <asm/simd.h>
|
||||||
|
|
||||||
typedef void (sha1_transform_fn)(u32 *digest, const char *data,
|
|
||||||
unsigned int rounds);
|
|
||||||
|
|
||||||
static int sha1_update(struct shash_desc *desc, const u8 *data,
|
static int sha1_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, sha1_transform_fn *sha1_xform)
|
unsigned int len, sha1_block_fn *sha1_xform)
|
||||||
{
|
{
|
||||||
struct sha1_state *sctx = shash_desc_ctx(desc);
|
struct sha1_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
@ -39,48 +36,47 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
|
|||||||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
|
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
|
||||||
return crypto_sha1_update(desc, data, len);
|
return crypto_sha1_update(desc, data, len);
|
||||||
|
|
||||||
/* make sure casting to sha1_block_fn() is safe */
|
/*
|
||||||
|
* Make sure struct sha1_state begins directly with the SHA1
|
||||||
|
* 160-bit internal state, as this is what the asm functions expect.
|
||||||
|
*/
|
||||||
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
|
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha1_base_do_update(desc, data, len,
|
sha1_base_do_update(desc, data, len, sha1_xform);
|
||||||
(sha1_block_fn *)sha1_xform);
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_finup(struct shash_desc *desc, const u8 *data,
|
static int sha1_finup(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
|
unsigned int len, u8 *out, sha1_block_fn *sha1_xform)
|
||||||
{
|
{
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return crypto_sha1_finup(desc, data, len, out);
|
return crypto_sha1_finup(desc, data, len, out);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
if (len)
|
if (len)
|
||||||
sha1_base_do_update(desc, data, len,
|
sha1_base_do_update(desc, data, len, sha1_xform);
|
||||||
(sha1_block_fn *)sha1_xform);
|
sha1_base_do_finalize(desc, sha1_xform);
|
||||||
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform);
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
return sha1_base_finish(desc, out);
|
return sha1_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
|
asmlinkage void sha1_transform_ssse3(struct sha1_state *state,
|
||||||
unsigned int rounds);
|
const u8 *data, int blocks);
|
||||||
|
|
||||||
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
|
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
return sha1_update(desc, data, len,
|
return sha1_update(desc, data, len, sha1_transform_ssse3);
|
||||||
(sha1_transform_fn *) sha1_transform_ssse3);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
|
static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out)
|
unsigned int len, u8 *out)
|
||||||
{
|
{
|
||||||
return sha1_finup(desc, data, len, out,
|
return sha1_finup(desc, data, len, out, sha1_transform_ssse3);
|
||||||
(sha1_transform_fn *) sha1_transform_ssse3);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add padding and return the message digest. */
|
/* Add padding and return the message digest. */
|
||||||
@ -119,21 +115,19 @@ static void unregister_sha1_ssse3(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_AS_AVX
|
#ifdef CONFIG_AS_AVX
|
||||||
asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
|
asmlinkage void sha1_transform_avx(struct sha1_state *state,
|
||||||
unsigned int rounds);
|
const u8 *data, int blocks);
|
||||||
|
|
||||||
static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
|
static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
return sha1_update(desc, data, len,
|
return sha1_update(desc, data, len, sha1_transform_avx);
|
||||||
(sha1_transform_fn *) sha1_transform_avx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
|
static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out)
|
unsigned int len, u8 *out)
|
||||||
{
|
{
|
||||||
return sha1_finup(desc, data, len, out,
|
return sha1_finup(desc, data, len, out, sha1_transform_avx);
|
||||||
(sha1_transform_fn *) sha1_transform_avx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_avx_final(struct shash_desc *desc, u8 *out)
|
static int sha1_avx_final(struct shash_desc *desc, u8 *out)
|
||||||
@ -190,8 +184,8 @@ static inline void unregister_sha1_avx(void) { }
|
|||||||
#if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX)
|
#if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX)
|
||||||
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
|
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
|
||||||
|
|
||||||
asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
|
asmlinkage void sha1_transform_avx2(struct sha1_state *state,
|
||||||
unsigned int rounds);
|
const u8 *data, int blocks);
|
||||||
|
|
||||||
static bool avx2_usable(void)
|
static bool avx2_usable(void)
|
||||||
{
|
{
|
||||||
@ -203,28 +197,26 @@ static bool avx2_usable(void)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sha1_apply_transform_avx2(u32 *digest, const char *data,
|
static void sha1_apply_transform_avx2(struct sha1_state *state,
|
||||||
unsigned int rounds)
|
const u8 *data, int blocks)
|
||||||
{
|
{
|
||||||
/* Select the optimal transform based on data block size */
|
/* Select the optimal transform based on data block size */
|
||||||
if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE)
|
if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE)
|
||||||
sha1_transform_avx2(digest, data, rounds);
|
sha1_transform_avx2(state, data, blocks);
|
||||||
else
|
else
|
||||||
sha1_transform_avx(digest, data, rounds);
|
sha1_transform_avx(state, data, blocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_avx2_update(struct shash_desc *desc, const u8 *data,
|
static int sha1_avx2_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
return sha1_update(desc, data, len,
|
return sha1_update(desc, data, len, sha1_apply_transform_avx2);
|
||||||
(sha1_transform_fn *) sha1_apply_transform_avx2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data,
|
static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out)
|
unsigned int len, u8 *out)
|
||||||
{
|
{
|
||||||
return sha1_finup(desc, data, len, out,
|
return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2);
|
||||||
(sha1_transform_fn *) sha1_apply_transform_avx2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_avx2_final(struct shash_desc *desc, u8 *out)
|
static int sha1_avx2_final(struct shash_desc *desc, u8 *out)
|
||||||
@ -267,21 +259,19 @@ static inline void unregister_sha1_avx2(void) { }
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_AS_SHA1_NI
|
#ifdef CONFIG_AS_SHA1_NI
|
||||||
asmlinkage void sha1_ni_transform(u32 *digest, const char *data,
|
asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
|
||||||
unsigned int rounds);
|
int rounds);
|
||||||
|
|
||||||
static int sha1_ni_update(struct shash_desc *desc, const u8 *data,
|
static int sha1_ni_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
return sha1_update(desc, data, len,
|
return sha1_update(desc, data, len, sha1_ni_transform);
|
||||||
(sha1_transform_fn *) sha1_ni_transform);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
|
static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out)
|
unsigned int len, u8 *out)
|
||||||
{
|
{
|
||||||
return sha1_finup(desc, data, len, out,
|
return sha1_finup(desc, data, len, out, sha1_ni_transform);
|
||||||
(sha1_transform_fn *) sha1_ni_transform);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_ni_final(struct shash_desc *desc, u8 *out)
|
static int sha1_ni_final(struct shash_desc *desc, u8 *out)
|
||||||
|
@ -341,8 +341,8 @@ a = TMP_
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
## void sha256_transform_avx(state sha256_state *state, const u8 *data, int blocks)
|
||||||
## arg 1 : pointer to digest
|
## arg 1 : pointer to state
|
||||||
## arg 2 : pointer to input data
|
## arg 2 : pointer to input data
|
||||||
## arg 3 : Num blocks
|
## arg 3 : Num blocks
|
||||||
########################################################################
|
########################################################################
|
||||||
|
@ -520,8 +520,8 @@ STACK_SIZE = _RSP + _RSP_SIZE
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks)
|
||||||
## arg 1 : pointer to digest
|
## arg 1 : pointer to state
|
||||||
## arg 2 : pointer to input data
|
## arg 2 : pointer to input data
|
||||||
## arg 3 : Num blocks
|
## arg 3 : Num blocks
|
||||||
########################################################################
|
########################################################################
|
||||||
|
@ -347,8 +347,10 @@ a = TMP_
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
## void sha256_transform_ssse3(struct sha256_state *state, const u8 *data,
|
||||||
## arg 1 : pointer to digest
|
## int blocks);
|
||||||
|
## arg 1 : pointer to state
|
||||||
|
## (struct sha256_state is assumed to begin with u32 state[8])
|
||||||
## arg 2 : pointer to input data
|
## arg 2 : pointer to input data
|
||||||
## arg 3 : Num blocks
|
## arg 3 : Num blocks
|
||||||
########################################################################
|
########################################################################
|
||||||
|
@ -41,12 +41,11 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <asm/simd.h>
|
#include <asm/simd.h>
|
||||||
|
|
||||||
asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
|
asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
|
||||||
u64 rounds);
|
const u8 *data, int blocks);
|
||||||
typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds);
|
|
||||||
|
|
||||||
static int _sha256_update(struct shash_desc *desc, const u8 *data,
|
static int _sha256_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, sha256_transform_fn *sha256_xform)
|
unsigned int len, sha256_block_fn *sha256_xform)
|
||||||
{
|
{
|
||||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
@ -54,28 +53,29 @@ static int _sha256_update(struct shash_desc *desc, const u8 *data,
|
|||||||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
|
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
|
||||||
return crypto_sha256_update(desc, data, len);
|
return crypto_sha256_update(desc, data, len);
|
||||||
|
|
||||||
/* make sure casting to sha256_block_fn() is safe */
|
/*
|
||||||
|
* Make sure struct sha256_state begins directly with the SHA256
|
||||||
|
* 256-bit internal state, as this is what the asm functions expect.
|
||||||
|
*/
|
||||||
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
|
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len, sha256_xform);
|
||||||
(sha256_block_fn *)sha256_xform);
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha256_finup(struct shash_desc *desc, const u8 *data,
|
static int sha256_finup(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
|
unsigned int len, u8 *out, sha256_block_fn *sha256_xform)
|
||||||
{
|
{
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return crypto_sha256_finup(desc, data, len, out);
|
return crypto_sha256_finup(desc, data, len, out);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
if (len)
|
if (len)
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len, sha256_xform);
|
||||||
(sha256_block_fn *)sha256_xform);
|
sha256_base_do_finalize(desc, sha256_xform);
|
||||||
sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_xform);
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
@ -145,8 +145,8 @@ static void unregister_sha256_ssse3(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_AS_AVX
|
#ifdef CONFIG_AS_AVX
|
||||||
asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
|
asmlinkage void sha256_transform_avx(struct sha256_state *state,
|
||||||
u64 rounds);
|
const u8 *data, int blocks);
|
||||||
|
|
||||||
static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
|
static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
@ -227,8 +227,8 @@ static inline void unregister_sha256_avx(void) { }
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
|
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
|
||||||
asmlinkage void sha256_transform_rorx(u32 *digest, const char *data,
|
asmlinkage void sha256_transform_rorx(struct sha256_state *state,
|
||||||
u64 rounds);
|
const u8 *data, int blocks);
|
||||||
|
|
||||||
static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
|
static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
@ -307,8 +307,8 @@ static inline void unregister_sha256_avx2(void) { }
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_AS_SHA256_NI
|
#ifdef CONFIG_AS_SHA256_NI
|
||||||
asmlinkage void sha256_ni_transform(u32 *digest, const char *data,
|
asmlinkage void sha256_ni_transform(struct sha256_state *digest,
|
||||||
u64 rounds); /*unsigned int rounds);*/
|
const u8 *data, int rounds);
|
||||||
|
|
||||||
static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
|
static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
|
@ -271,11 +271,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
# void sha512_transform_avx(void* D, const void* M, u64 L)
|
# void sha512_transform_avx(sha512_state *state, const u8 *data, int blocks)
|
||||||
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
|
# Purpose: Updates the SHA512 digest stored at "state" with the message
|
||||||
# The size of the message pointed to by M must be an integer multiple of SHA512
|
# stored in "data".
|
||||||
# message blocks.
|
# The size of the message pointed to by "data" must be an integer multiple
|
||||||
# L is the message length in SHA512 blocks
|
# of SHA512 message blocks.
|
||||||
|
# "blocks" is the message length in SHA512 blocks
|
||||||
########################################################################
|
########################################################################
|
||||||
SYM_FUNC_START(sha512_transform_avx)
|
SYM_FUNC_START(sha512_transform_avx)
|
||||||
cmp $0, msglen
|
cmp $0, msglen
|
||||||
|
@ -563,11 +563,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
# void sha512_transform_rorx(void* D, const void* M, uint64_t L)#
|
# void sha512_transform_rorx(sha512_state *state, const u8 *data, int blocks)
|
||||||
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
|
# Purpose: Updates the SHA512 digest stored at "state" with the message
|
||||||
# The size of the message pointed to by M must be an integer multiple of SHA512
|
# stored in "data".
|
||||||
# message blocks.
|
# The size of the message pointed to by "data" must be an integer multiple
|
||||||
# L is the message length in SHA512 blocks
|
# of SHA512 message blocks.
|
||||||
|
# "blocks" is the message length in SHA512 blocks
|
||||||
########################################################################
|
########################################################################
|
||||||
SYM_FUNC_START(sha512_transform_rorx)
|
SYM_FUNC_START(sha512_transform_rorx)
|
||||||
# Allocate Stack Space
|
# Allocate Stack Space
|
||||||
|
@ -269,11 +269,14 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
# void sha512_transform_ssse3(void* D, const void* M, u64 L)#
|
## void sha512_transform_ssse3(struct sha512_state *state, const u8 *data,
|
||||||
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
|
## int blocks);
|
||||||
# The size of the message pointed to by M must be an integer multiple of SHA512
|
# (struct sha512_state is assumed to begin with u64 state[8])
|
||||||
# message blocks.
|
# Purpose: Updates the SHA512 digest stored at "state" with the message
|
||||||
# L is the message length in SHA512 blocks.
|
# stored in "data".
|
||||||
|
# The size of the message pointed to by "data" must be an integer multiple
|
||||||
|
# of SHA512 message blocks.
|
||||||
|
# "blocks" is the message length in SHA512 blocks.
|
||||||
########################################################################
|
########################################################################
|
||||||
SYM_FUNC_START(sha512_transform_ssse3)
|
SYM_FUNC_START(sha512_transform_ssse3)
|
||||||
|
|
||||||
|
@ -39,13 +39,11 @@
|
|||||||
#include <crypto/sha512_base.h>
|
#include <crypto/sha512_base.h>
|
||||||
#include <asm/simd.h>
|
#include <asm/simd.h>
|
||||||
|
|
||||||
asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
|
asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
|
||||||
u64 rounds);
|
const u8 *data, int blocks);
|
||||||
|
|
||||||
typedef void (sha512_transform_fn)(u64 *digest, const char *data, u64 rounds);
|
|
||||||
|
|
||||||
static int sha512_update(struct shash_desc *desc, const u8 *data,
|
static int sha512_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, sha512_transform_fn *sha512_xform)
|
unsigned int len, sha512_block_fn *sha512_xform)
|
||||||
{
|
{
|
||||||
struct sha512_state *sctx = shash_desc_ctx(desc);
|
struct sha512_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
@ -53,28 +51,29 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
|
|||||||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
|
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
|
||||||
return crypto_sha512_update(desc, data, len);
|
return crypto_sha512_update(desc, data, len);
|
||||||
|
|
||||||
/* make sure casting to sha512_block_fn() is safe */
|
/*
|
||||||
|
* Make sure struct sha512_state begins directly with the SHA512
|
||||||
|
* 512-bit internal state, as this is what the asm functions expect.
|
||||||
|
*/
|
||||||
BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
|
BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha512_base_do_update(desc, data, len,
|
sha512_base_do_update(desc, data, len, sha512_xform);
|
||||||
(sha512_block_fn *)sha512_xform);
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha512_finup(struct shash_desc *desc, const u8 *data,
|
static int sha512_finup(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
|
unsigned int len, u8 *out, sha512_block_fn *sha512_xform)
|
||||||
{
|
{
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return crypto_sha512_finup(desc, data, len, out);
|
return crypto_sha512_finup(desc, data, len, out);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
if (len)
|
if (len)
|
||||||
sha512_base_do_update(desc, data, len,
|
sha512_base_do_update(desc, data, len, sha512_xform);
|
||||||
(sha512_block_fn *)sha512_xform);
|
sha512_base_do_finalize(desc, sha512_xform);
|
||||||
sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_xform);
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
return sha512_base_finish(desc, out);
|
return sha512_base_finish(desc, out);
|
||||||
@ -144,8 +143,8 @@ static void unregister_sha512_ssse3(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_AS_AVX
|
#ifdef CONFIG_AS_AVX
|
||||||
asmlinkage void sha512_transform_avx(u64 *digest, const char *data,
|
asmlinkage void sha512_transform_avx(struct sha512_state *state,
|
||||||
u64 rounds);
|
const u8 *data, int blocks);
|
||||||
static bool avx_usable(void)
|
static bool avx_usable(void)
|
||||||
{
|
{
|
||||||
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
|
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
|
||||||
@ -225,8 +224,8 @@ static inline void unregister_sha512_avx(void) { }
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
|
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
|
||||||
asmlinkage void sha512_transform_rorx(u64 *digest, const char *data,
|
asmlinkage void sha512_transform_rorx(struct sha512_state *state,
|
||||||
u64 rounds);
|
const u8 *data, int blocks);
|
||||||
|
|
||||||
static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
|
static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
|
@ -22,20 +22,17 @@
|
|||||||
#define TWOFISH_PARALLEL_BLOCKS 8
|
#define TWOFISH_PARALLEL_BLOCKS 8
|
||||||
|
|
||||||
/* 8-way parallel cipher functions */
|
/* 8-way parallel cipher functions */
|
||||||
asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst,
|
asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
|
asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst,
|
le128 *iv);
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
|
|
||||||
asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
|
asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
|
asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
|
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||||
const u8 *key, unsigned int keylen)
|
const u8 *key, unsigned int keylen)
|
||||||
@ -43,22 +40,19 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
|
|||||||
return twofish_setkey(&tfm->base, key, keylen);
|
return twofish_setkey(&tfm->base, key, keylen);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
|
static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
__twofish_enc_blk_3way(ctx, dst, src, false);
|
__twofish_enc_blk_3way(ctx, dst, src, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk);
|
||||||
GLUE_FUNC_CAST(twofish_enc_blk));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk);
|
||||||
GLUE_FUNC_CAST(twofish_dec_blk));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct twofish_xts_ctx {
|
struct twofish_xts_ctx {
|
||||||
@ -70,7 +64,6 @@ static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
u32 *flags = &tfm->base.crt_flags;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = xts_verify_key(tfm, key, keylen);
|
err = xts_verify_key(tfm, key, keylen);
|
||||||
@ -78,13 +71,12 @@ static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* first half of xts-key is for crypt */
|
/* first half of xts-key is for crypt */
|
||||||
err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
|
err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* second half of xts-key is for tweak */
|
/* second half of xts-key is for tweak */
|
||||||
return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
|
return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
|
||||||
flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct common_glue_ctx twofish_enc = {
|
static const struct common_glue_ctx twofish_enc = {
|
||||||
@ -93,13 +85,13 @@ static const struct common_glue_ctx twofish_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) }
|
.fn_u = { .ecb = twofish_ecb_enc_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
|
.fn_u = { .ecb = twofish_enc_blk_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
|
.fn_u = { .ecb = twofish_enc_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -109,13 +101,13 @@ static const struct common_glue_ctx twofish_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) }
|
.fn_u = { .ctr = twofish_ctr_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) }
|
.fn_u = { .ctr = twofish_enc_blk_ctr_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) }
|
.fn_u = { .ctr = twofish_enc_blk_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -125,10 +117,10 @@ static const struct common_glue_ctx twofish_enc_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) }
|
.fn_u = { .xts = twofish_xts_enc_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) }
|
.fn_u = { .xts = twofish_xts_enc }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -138,13 +130,13 @@ static const struct common_glue_ctx twofish_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) }
|
.fn_u = { .ecb = twofish_ecb_dec_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
|
.fn_u = { .ecb = twofish_dec_blk_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
|
.fn_u = { .ecb = twofish_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -154,13 +146,13 @@ static const struct common_glue_ctx twofish_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) }
|
.fn_u = { .cbc = twofish_cbc_dec_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
|
.fn_u = { .cbc = twofish_dec_blk_cbc_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
|
.fn_u = { .cbc = twofish_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -170,10 +162,10 @@ static const struct common_glue_ctx twofish_dec_xts = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) }
|
.fn_u = { .xts = twofish_xts_dec_8way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) }
|
.fn_u = { .xts = twofish_xts_dec }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -189,8 +181,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
|
return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
@ -208,8 +199,7 @@ static int xts_encrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&twofish_enc_xts, req,
|
return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk,
|
||||||
XTS_TWEAK_CAST(twofish_enc_blk),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,8 +208,7 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
return glue_xts_req_128bit(&twofish_dec_xts, req,
|
return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk,
|
||||||
XTS_TWEAK_CAST(twofish_enc_blk),
|
|
||||||
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,21 +25,22 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
|
|||||||
return twofish_setkey(&tfm->base, key, keylen);
|
return twofish_setkey(&tfm->base, key, keylen);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
|
static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
__twofish_enc_blk_3way(ctx, dst, src, false);
|
__twofish_enc_blk_3way(ctx, dst, src, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
|
static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst,
|
||||||
const u8 *src)
|
const u8 *src)
|
||||||
{
|
{
|
||||||
__twofish_enc_blk_3way(ctx, dst, src, true);
|
__twofish_enc_blk_3way(ctx, dst, src, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src)
|
void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s)
|
||||||
{
|
{
|
||||||
u128 ivs[2];
|
u128 ivs[2];
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
ivs[0] = src[0];
|
ivs[0] = src[0];
|
||||||
ivs[1] = src[1];
|
ivs[1] = src[1];
|
||||||
@ -51,9 +52,11 @@ void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way);
|
EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way);
|
||||||
|
|
||||||
void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
|
||||||
{
|
{
|
||||||
be128 ctrblk;
|
be128 ctrblk;
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
if (dst != src)
|
if (dst != src)
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
@ -66,10 +69,11 @@ void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr);
|
EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr);
|
||||||
|
|
||||||
void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
|
void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
|
||||||
le128 *iv)
|
|
||||||
{
|
{
|
||||||
be128 ctrblks[3];
|
be128 ctrblks[3];
|
||||||
|
u128 *dst = (u128 *)d;
|
||||||
|
const u128 *src = (const u128 *)s;
|
||||||
|
|
||||||
if (dst != src) {
|
if (dst != src) {
|
||||||
dst[0] = src[0];
|
dst[0] = src[0];
|
||||||
@ -94,10 +98,10 @@ static const struct common_glue_ctx twofish_enc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
|
.fn_u = { .ecb = twofish_enc_blk_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
|
.fn_u = { .ecb = twofish_enc_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -107,10 +111,10 @@ static const struct common_glue_ctx twofish_ctr = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) }
|
.fn_u = { .ctr = twofish_enc_blk_ctr_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) }
|
.fn_u = { .ctr = twofish_enc_blk_ctr }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -120,10 +124,10 @@ static const struct common_glue_ctx twofish_dec = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
|
.fn_u = { .ecb = twofish_dec_blk_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
|
.fn_u = { .ecb = twofish_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -133,10 +137,10 @@ static const struct common_glue_ctx twofish_dec_cbc = {
|
|||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 3,
|
.num_blocks = 3,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
|
.fn_u = { .cbc = twofish_dec_blk_cbc_3way }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
|
.fn_u = { .cbc = twofish_dec_blk }
|
||||||
} }
|
} }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -152,8 +156,7 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
static int cbc_encrypt(struct skcipher_request *req)
|
static int cbc_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
|
return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
|
||||||
req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cbc_decrypt(struct skcipher_request *req)
|
static int cbc_decrypt(struct skcipher_request *req)
|
||||||
|
@ -26,71 +26,66 @@ struct camellia_xts_ctx {
|
|||||||
|
|
||||||
extern int __camellia_setkey(struct camellia_ctx *cctx,
|
extern int __camellia_setkey(struct camellia_ctx *cctx,
|
||||||
const unsigned char *key,
|
const unsigned char *key,
|
||||||
unsigned int key_len, u32 *flags);
|
unsigned int key_len);
|
||||||
|
|
||||||
extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
unsigned int keylen);
|
unsigned int keylen);
|
||||||
|
|
||||||
/* regular block cipher functions */
|
/* regular block cipher functions */
|
||||||
asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, bool xor);
|
bool xor);
|
||||||
asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
/* 2-way parallel cipher functions */
|
/* 2-way parallel cipher functions */
|
||||||
asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, bool xor);
|
bool xor);
|
||||||
asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
/* 16-way parallel cipher functions (avx/aes-ni) */
|
/* 16-way parallel cipher functions (avx/aes-ni) */
|
||||||
asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
|
le128 *iv);
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
|
|
||||||
asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
|
asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
|
static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
__camellia_enc_blk(ctx, dst, src, false);
|
__camellia_enc_blk(ctx, dst, src, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
|
static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
__camellia_enc_blk(ctx, dst, src, true);
|
__camellia_enc_blk(ctx, dst, src, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
|
static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst,
|
||||||
const u8 *src)
|
const u8 *src)
|
||||||
{
|
{
|
||||||
__camellia_enc_blk_2way(ctx, dst, src, false);
|
__camellia_enc_blk_2way(ctx, dst, src, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
|
static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst,
|
||||||
const u8 *src)
|
const u8 *src)
|
||||||
{
|
{
|
||||||
__camellia_enc_blk_2way(ctx, dst, src, true);
|
__camellia_enc_blk_2way(ctx, dst, src, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* glue helpers */
|
/* glue helpers */
|
||||||
extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
|
extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
|
extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
|
extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
|
extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src,
|
||||||
extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
|
le128 *iv);
|
||||||
|
extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src,
|
||||||
|
le128 *iv);
|
||||||
|
|
||||||
#endif /* ASM_X86_CAMELLIA_H */
|
#endif /* ASM_X86_CAMELLIA_H */
|
||||||
|
@ -11,18 +11,13 @@
|
|||||||
#include <asm/fpu/api.h>
|
#include <asm/fpu/api.h>
|
||||||
#include <crypto/b128ops.h>
|
#include <crypto/b128ops.h>
|
||||||
|
|
||||||
typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
|
typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src);
|
||||||
typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
|
typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src);
|
||||||
typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
|
typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src,
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
|
typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src,
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
|
|
||||||
#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
|
|
||||||
#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
|
|
||||||
#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
|
|
||||||
|
|
||||||
struct common_glue_func_entry {
|
struct common_glue_func_entry {
|
||||||
unsigned int num_blocks; /* number of blocks that @fn will process */
|
unsigned int num_blocks; /* number of blocks that @fn will process */
|
||||||
union {
|
union {
|
||||||
@ -116,7 +111,8 @@ extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
|||||||
common_glue_func_t tweak_fn, void *tweak_ctx,
|
common_glue_func_t tweak_fn, void *tweak_ctx,
|
||||||
void *crypt_ctx, bool decrypt);
|
void *crypt_ctx, bool decrypt);
|
||||||
|
|
||||||
extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
|
extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst,
|
||||||
le128 *iv, common_glue_func_t fn);
|
const u8 *src, le128 *iv,
|
||||||
|
common_glue_func_t fn);
|
||||||
|
|
||||||
#endif /* _CRYPTO_GLUE_HELPER_H */
|
#endif /* _CRYPTO_GLUE_HELPER_H */
|
||||||
|
@ -15,26 +15,26 @@ struct serpent_xts_ctx {
|
|||||||
struct serpent_ctx crypt_ctx;
|
struct serpent_ctx crypt_ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
|
|
||||||
asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
|
|
||||||
asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src, le128 *iv);
|
|
||||||
|
|
||||||
extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
|
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
|
asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
|
||||||
extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
|
const u8 *src, le128 *iv);
|
||||||
|
asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
|
||||||
|
const u8 *src, le128 *iv);
|
||||||
|
|
||||||
|
extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
|
||||||
|
le128 *iv);
|
||||||
|
|
||||||
|
extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
|
||||||
|
extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
|
||||||
|
|
||||||
extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
unsigned int keylen);
|
unsigned int keylen);
|
||||||
|
@ -9,25 +9,23 @@
|
|||||||
|
|
||||||
#define SERPENT_PARALLEL_BLOCKS 4
|
#define SERPENT_PARALLEL_BLOCKS 4
|
||||||
|
|
||||||
asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
|
||||||
const u8 *src, bool xor);
|
const u8 *src, bool xor);
|
||||||
asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
|
|
||||||
static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
|
static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
__serpent_enc_blk_4way(ctx, dst, src, false);
|
__serpent_enc_blk_4way(ctx, dst, src, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
|
static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
|
||||||
const u8 *src)
|
u8 *dst, const u8 *src)
|
||||||
{
|
{
|
||||||
__serpent_enc_blk_4way(ctx, dst, src, true);
|
__serpent_enc_blk_4way(ctx, dst, src, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
|
static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
serpent_dec_blk_4way(ctx, dst, src);
|
serpent_dec_blk_4way(ctx, dst, src);
|
||||||
}
|
}
|
||||||
@ -36,25 +34,23 @@ static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
|
|||||||
|
|
||||||
#define SERPENT_PARALLEL_BLOCKS 8
|
#define SERPENT_PARALLEL_BLOCKS 8
|
||||||
|
|
||||||
asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
|
||||||
const u8 *src, bool xor);
|
const u8 *src, bool xor);
|
||||||
asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
|
asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
|
||||||
const u8 *src);
|
const u8 *src);
|
||||||
|
|
||||||
static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
|
static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
__serpent_enc_blk_8way(ctx, dst, src, false);
|
__serpent_enc_blk_8way(ctx, dst, src, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
|
static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
|
||||||
const u8 *src)
|
u8 *dst, const u8 *src)
|
||||||
{
|
{
|
||||||
__serpent_enc_blk_8way(ctx, dst, src, true);
|
__serpent_enc_blk_8way(ctx, dst, src, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
|
static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
|
||||||
const u8 *src)
|
|
||||||
{
|
{
|
||||||
serpent_dec_blk_8way(ctx, dst, src);
|
serpent_dec_blk_8way(ctx, dst, src);
|
||||||
}
|
}
|
||||||
|
@ -7,22 +7,19 @@
|
|||||||
#include <crypto/b128ops.h>
|
#include <crypto/b128ops.h>
|
||||||
|
|
||||||
/* regular block cipher functions from twofish_x86_64 module */
|
/* regular block cipher functions from twofish_x86_64 module */
|
||||||
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
|
asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src);
|
||||||
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
|
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
/* 3-way parallel cipher functions */
|
/* 3-way parallel cipher functions */
|
||||||
asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
|
asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
const u8 *src, bool xor);
|
bool xor);
|
||||||
asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
|
asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
const u8 *src);
|
|
||||||
|
|
||||||
/* helpers from twofish_x86_64-3way module */
|
/* helpers from twofish_x86_64-3way module */
|
||||||
extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
|
extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src);
|
||||||
extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
|
extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src,
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
|
extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src,
|
||||||
le128 *iv);
|
le128 *iv);
|
||||||
|
|
||||||
#endif /* ASM_X86_TWOFISH_H */
|
#endif /* ASM_X86_TWOFISH_H */
|
||||||
|
@ -511,10 +511,10 @@ config CRYPTO_ESSIV
|
|||||||
encryption.
|
encryption.
|
||||||
|
|
||||||
This driver implements a crypto API template that can be
|
This driver implements a crypto API template that can be
|
||||||
instantiated either as a skcipher or as a aead (depending on the
|
instantiated either as an skcipher or as an AEAD (depending on the
|
||||||
type of the first template argument), and which defers encryption
|
type of the first template argument), and which defers encryption
|
||||||
and decryption requests to the encapsulated cipher after applying
|
and decryption requests to the encapsulated cipher after applying
|
||||||
ESSIV to the input IV. Note that in the aead case, it is assumed
|
ESSIV to the input IV. Note that in the AEAD case, it is assumed
|
||||||
that the keys are presented in the same format used by the authenc
|
that the keys are presented in the same format used by the authenc
|
||||||
template, and that the IV appears at the end of the authenticated
|
template, and that the IV appears at the end of the authenticated
|
||||||
associated data (AAD) region (which is how dm-crypt uses it.)
|
associated data (AAD) region (which is how dm-crypt uses it.)
|
||||||
|
@ -151,9 +151,9 @@ int crypto_register_acomp(struct acomp_alg *alg)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_register_acomp);
|
EXPORT_SYMBOL_GPL(crypto_register_acomp);
|
||||||
|
|
||||||
int crypto_unregister_acomp(struct acomp_alg *alg)
|
void crypto_unregister_acomp(struct acomp_alg *alg)
|
||||||
{
|
{
|
||||||
return crypto_unregister_alg(&alg->base);
|
crypto_unregister_alg(&alg->base);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
|
EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
|
||||||
|
|
||||||
|
@ -39,8 +39,6 @@
|
|||||||
#include <crypto/scatterwalk.h>
|
#include <crypto/scatterwalk.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
#include "internal.h"
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Size of right-hand part of input data, in bytes; also the size of the block
|
* Size of right-hand part of input data, in bytes; also the size of the block
|
||||||
* cipher's block size and the hash function's output.
|
* cipher's block size and the hash function's output.
|
||||||
@ -64,7 +62,7 @@
|
|||||||
|
|
||||||
struct adiantum_instance_ctx {
|
struct adiantum_instance_ctx {
|
||||||
struct crypto_skcipher_spawn streamcipher_spawn;
|
struct crypto_skcipher_spawn streamcipher_spawn;
|
||||||
struct crypto_spawn blockcipher_spawn;
|
struct crypto_cipher_spawn blockcipher_spawn;
|
||||||
struct crypto_shash_spawn hash_spawn;
|
struct crypto_shash_spawn hash_spawn;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -72,7 +70,7 @@ struct adiantum_tfm_ctx {
|
|||||||
struct crypto_skcipher *streamcipher;
|
struct crypto_skcipher *streamcipher;
|
||||||
struct crypto_cipher *blockcipher;
|
struct crypto_cipher *blockcipher;
|
||||||
struct crypto_shash *hash;
|
struct crypto_shash *hash;
|
||||||
struct poly1305_key header_hash_key;
|
struct poly1305_core_key header_hash_key;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct adiantum_request_ctx {
|
struct adiantum_request_ctx {
|
||||||
@ -135,9 +133,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
crypto_skcipher_get_flags(tfm) &
|
crypto_skcipher_get_flags(tfm) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
|
err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
|
||||||
crypto_skcipher_set_flags(tfm,
|
|
||||||
crypto_skcipher_get_flags(tctx->streamcipher) &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -167,9 +162,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_cipher_setkey(tctx->blockcipher, keyp,
|
err = crypto_cipher_setkey(tctx->blockcipher, keyp,
|
||||||
BLOCKCIPHER_KEY_SIZE);
|
BLOCKCIPHER_KEY_SIZE);
|
||||||
crypto_skcipher_set_flags(tfm,
|
|
||||||
crypto_cipher_get_flags(tctx->blockcipher) &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
keyp += BLOCKCIPHER_KEY_SIZE;
|
keyp += BLOCKCIPHER_KEY_SIZE;
|
||||||
@ -182,8 +174,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
|
crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
|
err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
|
||||||
crypto_skcipher_set_flags(tfm, crypto_shash_get_flags(tctx->hash) &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
keyp += NHPOLY1305_KEY_SIZE;
|
keyp += NHPOLY1305_KEY_SIZE;
|
||||||
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
|
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
|
||||||
out:
|
out:
|
||||||
@ -249,7 +239,7 @@ static void adiantum_hash_header(struct skcipher_request *req)
|
|||||||
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
|
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
|
||||||
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
|
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
|
||||||
|
|
||||||
poly1305_core_emit(&state, &rctx->header_hash);
|
poly1305_core_emit(&state, NULL, &rctx->header_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
|
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
|
||||||
@ -469,7 +459,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
|
|||||||
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
|
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
|
||||||
|
|
||||||
crypto_drop_skcipher(&ictx->streamcipher_spawn);
|
crypto_drop_skcipher(&ictx->streamcipher_spawn);
|
||||||
crypto_drop_spawn(&ictx->blockcipher_spawn);
|
crypto_drop_cipher(&ictx->blockcipher_spawn);
|
||||||
crypto_drop_shash(&ictx->hash_spawn);
|
crypto_drop_shash(&ictx->hash_spawn);
|
||||||
kfree(inst);
|
kfree(inst);
|
||||||
}
|
}
|
||||||
@ -501,14 +491,12 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
|
|||||||
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
{
|
{
|
||||||
struct crypto_attr_type *algt;
|
struct crypto_attr_type *algt;
|
||||||
const char *streamcipher_name;
|
u32 mask;
|
||||||
const char *blockcipher_name;
|
|
||||||
const char *nhpoly1305_name;
|
const char *nhpoly1305_name;
|
||||||
struct skcipher_instance *inst;
|
struct skcipher_instance *inst;
|
||||||
struct adiantum_instance_ctx *ictx;
|
struct adiantum_instance_ctx *ictx;
|
||||||
struct skcipher_alg *streamcipher_alg;
|
struct skcipher_alg *streamcipher_alg;
|
||||||
struct crypto_alg *blockcipher_alg;
|
struct crypto_alg *blockcipher_alg;
|
||||||
struct crypto_alg *_hash_alg;
|
|
||||||
struct shash_alg *hash_alg;
|
struct shash_alg *hash_alg;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -519,19 +507,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
streamcipher_name = crypto_attr_alg_name(tb[1]);
|
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||||
if (IS_ERR(streamcipher_name))
|
|
||||||
return PTR_ERR(streamcipher_name);
|
|
||||||
|
|
||||||
blockcipher_name = crypto_attr_alg_name(tb[2]);
|
|
||||||
if (IS_ERR(blockcipher_name))
|
|
||||||
return PTR_ERR(blockcipher_name);
|
|
||||||
|
|
||||||
nhpoly1305_name = crypto_attr_alg_name(tb[3]);
|
|
||||||
if (nhpoly1305_name == ERR_PTR(-ENOENT))
|
|
||||||
nhpoly1305_name = "nhpoly1305";
|
|
||||||
if (IS_ERR(nhpoly1305_name))
|
|
||||||
return PTR_ERR(nhpoly1305_name);
|
|
||||||
|
|
||||||
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
|
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
|
||||||
if (!inst)
|
if (!inst)
|
||||||
@ -539,37 +515,31 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||||||
ictx = skcipher_instance_ctx(inst);
|
ictx = skcipher_instance_ctx(inst);
|
||||||
|
|
||||||
/* Stream cipher, e.g. "xchacha12" */
|
/* Stream cipher, e.g. "xchacha12" */
|
||||||
crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
|
err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
|
||||||
skcipher_crypto_instance(inst));
|
skcipher_crypto_instance(inst),
|
||||||
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
|
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||||
0, crypto_requires_sync(algt->type,
|
|
||||||
algt->mask));
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free_inst;
|
goto err_free_inst;
|
||||||
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
|
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
|
||||||
|
|
||||||
/* Block cipher, e.g. "aes" */
|
/* Block cipher, e.g. "aes" */
|
||||||
crypto_set_spawn(&ictx->blockcipher_spawn,
|
err = crypto_grab_cipher(&ictx->blockcipher_spawn,
|
||||||
skcipher_crypto_instance(inst));
|
skcipher_crypto_instance(inst),
|
||||||
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
|
crypto_attr_alg_name(tb[2]), 0, mask);
|
||||||
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out_drop_streamcipher;
|
goto err_free_inst;
|
||||||
blockcipher_alg = ictx->blockcipher_spawn.alg;
|
blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
|
||||||
|
|
||||||
/* NHPoly1305 ε-∆U hash function */
|
/* NHPoly1305 ε-∆U hash function */
|
||||||
_hash_alg = crypto_alg_mod_lookup(nhpoly1305_name,
|
nhpoly1305_name = crypto_attr_alg_name(tb[3]);
|
||||||
CRYPTO_ALG_TYPE_SHASH,
|
if (nhpoly1305_name == ERR_PTR(-ENOENT))
|
||||||
CRYPTO_ALG_TYPE_MASK);
|
nhpoly1305_name = "nhpoly1305";
|
||||||
if (IS_ERR(_hash_alg)) {
|
err = crypto_grab_shash(&ictx->hash_spawn,
|
||||||
err = PTR_ERR(_hash_alg);
|
skcipher_crypto_instance(inst),
|
||||||
goto out_drop_blockcipher;
|
nhpoly1305_name, 0, mask);
|
||||||
}
|
|
||||||
hash_alg = __crypto_shash_alg(_hash_alg);
|
|
||||||
err = crypto_init_shash_spawn(&ictx->hash_spawn, hash_alg,
|
|
||||||
skcipher_crypto_instance(inst));
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out_put_hash;
|
goto err_free_inst;
|
||||||
|
hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
|
||||||
|
|
||||||
/* Check the set of algorithms */
|
/* Check the set of algorithms */
|
||||||
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
|
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
|
||||||
@ -578,7 +548,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||||||
streamcipher_alg->base.cra_name,
|
streamcipher_alg->base.cra_name,
|
||||||
blockcipher_alg->cra_name, hash_alg->base.cra_name);
|
blockcipher_alg->cra_name, hash_alg->base.cra_name);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out_drop_hash;
|
goto err_free_inst;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Instance fields */
|
/* Instance fields */
|
||||||
@ -587,13 +557,13 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"adiantum(%s,%s)", streamcipher_alg->base.cra_name,
|
"adiantum(%s,%s)", streamcipher_alg->base.cra_name,
|
||||||
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto out_drop_hash;
|
goto err_free_inst;
|
||||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"adiantum(%s,%s,%s)",
|
"adiantum(%s,%s,%s)",
|
||||||
streamcipher_alg->base.cra_driver_name,
|
streamcipher_alg->base.cra_driver_name,
|
||||||
blockcipher_alg->cra_driver_name,
|
blockcipher_alg->cra_driver_name,
|
||||||
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto out_drop_hash;
|
goto err_free_inst;
|
||||||
|
|
||||||
inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
|
inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
|
||||||
CRYPTO_ALG_ASYNC;
|
CRYPTO_ALG_ASYNC;
|
||||||
@ -623,22 +593,10 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||||||
inst->free = adiantum_free_instance;
|
inst->free = adiantum_free_instance;
|
||||||
|
|
||||||
err = skcipher_register_instance(tmpl, inst);
|
err = skcipher_register_instance(tmpl, inst);
|
||||||
if (err)
|
if (err) {
|
||||||
goto out_drop_hash;
|
err_free_inst:
|
||||||
|
adiantum_free_instance(inst);
|
||||||
crypto_mod_put(_hash_alg);
|
}
|
||||||
return 0;
|
|
||||||
|
|
||||||
out_drop_hash:
|
|
||||||
crypto_drop_shash(&ictx->hash_spawn);
|
|
||||||
out_put_hash:
|
|
||||||
crypto_mod_put(_hash_alg);
|
|
||||||
out_drop_blockcipher:
|
|
||||||
crypto_drop_spawn(&ictx->blockcipher_spawn);
|
|
||||||
out_drop_streamcipher:
|
|
||||||
crypto_drop_skcipher(&ictx->streamcipher_spawn);
|
|
||||||
out_free_inst:
|
|
||||||
kfree(inst);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,11 +185,6 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
|
|||||||
{
|
{
|
||||||
struct aead_instance *aead = aead_instance(inst);
|
struct aead_instance *aead = aead_instance(inst);
|
||||||
|
|
||||||
if (!aead->free) {
|
|
||||||
inst->tmpl->free(inst);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
aead->free(aead);
|
aead->free(aead);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,11 +202,12 @@ static const struct crypto_type crypto_aead_type = {
|
|||||||
.tfmsize = offsetof(struct crypto_aead, base),
|
.tfmsize = offsetof(struct crypto_aead, base),
|
||||||
};
|
};
|
||||||
|
|
||||||
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
|
int crypto_grab_aead(struct crypto_aead_spawn *spawn,
|
||||||
u32 type, u32 mask)
|
struct crypto_instance *inst,
|
||||||
|
const char *name, u32 type, u32 mask)
|
||||||
{
|
{
|
||||||
spawn->base.frontend = &crypto_aead_type;
|
spawn->base.frontend = &crypto_aead_type;
|
||||||
return crypto_grab_spawn(&spawn->base, name, type, mask);
|
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_grab_aead);
|
EXPORT_SYMBOL_GPL(crypto_grab_aead);
|
||||||
|
|
||||||
@ -292,6 +288,9 @@ int aead_register_instance(struct crypto_template *tmpl,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (WARN_ON(!inst->free))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
err = aead_prepare_alg(&inst->alg);
|
err = aead_prepare_alg(&inst->alg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -372,10 +372,8 @@ static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key,
|
|||||||
{
|
{
|
||||||
struct aegis_ctx *ctx = crypto_aead_ctx(aead);
|
struct aegis_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
|
|
||||||
if (keylen != AEGIS128_KEY_SIZE) {
|
if (keylen != AEGIS128_KEY_SIZE)
|
||||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
|
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1127,24 +1127,18 @@ EXPORT_SYMBOL_GPL(crypto_it_tab);
|
|||||||
* @in_key: The input key.
|
* @in_key: The input key.
|
||||||
* @key_len: The size of the key.
|
* @key_len: The size of the key.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
|
* This function uses aes_expand_key() to expand the key. &crypto_aes_ctx
|
||||||
* is set. The function uses aes_expand_key() to expand the key.
|
* _must_ be the private data embedded in @tfm which is retrieved with
|
||||||
* &crypto_aes_ctx _must_ be the private data embedded in @tfm which is
|
* crypto_tfm_ctx().
|
||||||
* retrieved with crypto_tfm_ctx().
|
*
|
||||||
|
* Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
|
||||||
*/
|
*/
|
||||||
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
u32 *flags = &tfm->crt_flags;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = aes_expandkey(ctx, in_key, key_len);
|
return aes_expandkey(ctx, in_key, key_len);
|
||||||
if (!ret)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_aes_set_key);
|
EXPORT_SYMBOL_GPL(crypto_aes_set_key);
|
||||||
|
|
||||||
|
@ -134,11 +134,13 @@ void af_alg_release_parent(struct sock *sk)
|
|||||||
sk = ask->parent;
|
sk = ask->parent;
|
||||||
ask = alg_sk(sk);
|
ask = alg_sk(sk);
|
||||||
|
|
||||||
lock_sock(sk);
|
local_bh_disable();
|
||||||
|
bh_lock_sock(sk);
|
||||||
ask->nokey_refcnt -= nokey;
|
ask->nokey_refcnt -= nokey;
|
||||||
if (!last)
|
if (!last)
|
||||||
last = !--ask->refcnt;
|
last = !--ask->refcnt;
|
||||||
release_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
|
local_bh_enable();
|
||||||
|
|
||||||
if (last)
|
if (last)
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
static const struct crypto_type crypto_ahash_type;
|
||||||
|
|
||||||
struct ahash_request_priv {
|
struct ahash_request_priv {
|
||||||
crypto_completion_t complete;
|
crypto_completion_t complete;
|
||||||
void *data;
|
void *data;
|
||||||
@ -509,6 +511,13 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
|
|||||||
return crypto_alg_extsize(alg);
|
return crypto_alg_extsize(alg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void crypto_ahash_free_instance(struct crypto_instance *inst)
|
||||||
|
{
|
||||||
|
struct ahash_instance *ahash = ahash_instance(inst);
|
||||||
|
|
||||||
|
ahash->free(ahash);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NET
|
#ifdef CONFIG_NET
|
||||||
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
|
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||||
{
|
{
|
||||||
@ -542,9 +551,10 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
|
|||||||
__crypto_hash_alg_common(alg)->digestsize);
|
__crypto_hash_alg_common(alg)->digestsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct crypto_type crypto_ahash_type = {
|
static const struct crypto_type crypto_ahash_type = {
|
||||||
.extsize = crypto_ahash_extsize,
|
.extsize = crypto_ahash_extsize,
|
||||||
.init_tfm = crypto_ahash_init_tfm,
|
.init_tfm = crypto_ahash_init_tfm,
|
||||||
|
.free = crypto_ahash_free_instance,
|
||||||
#ifdef CONFIG_PROC_FS
|
#ifdef CONFIG_PROC_FS
|
||||||
.show = crypto_ahash_show,
|
.show = crypto_ahash_show,
|
||||||
#endif
|
#endif
|
||||||
@ -554,7 +564,15 @@ const struct crypto_type crypto_ahash_type = {
|
|||||||
.type = CRYPTO_ALG_TYPE_AHASH,
|
.type = CRYPTO_ALG_TYPE_AHASH,
|
||||||
.tfmsize = offsetof(struct crypto_ahash, base),
|
.tfmsize = offsetof(struct crypto_ahash, base),
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(crypto_ahash_type);
|
|
||||||
|
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
|
||||||
|
struct crypto_instance *inst,
|
||||||
|
const char *name, u32 type, u32 mask)
|
||||||
|
{
|
||||||
|
spawn->base.frontend = &crypto_ahash_type;
|
||||||
|
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_grab_ahash);
|
||||||
|
|
||||||
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
|
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
|
||||||
u32 mask)
|
u32 mask)
|
||||||
@ -598,9 +616,9 @@ int crypto_register_ahash(struct ahash_alg *alg)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_register_ahash);
|
EXPORT_SYMBOL_GPL(crypto_register_ahash);
|
||||||
|
|
||||||
int crypto_unregister_ahash(struct ahash_alg *alg)
|
void crypto_unregister_ahash(struct ahash_alg *alg)
|
||||||
{
|
{
|
||||||
return crypto_unregister_alg(&alg->halg.base);
|
crypto_unregister_alg(&alg->halg.base);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
|
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
|
||||||
|
|
||||||
@ -638,6 +656,9 @@ int ahash_register_instance(struct crypto_template *tmpl,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (WARN_ON(!inst->free))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
err = ahash_prepare_alg(&inst->alg);
|
err = ahash_prepare_alg(&inst->alg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@ -646,31 +667,6 @@ int ahash_register_instance(struct crypto_template *tmpl,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ahash_register_instance);
|
EXPORT_SYMBOL_GPL(ahash_register_instance);
|
||||||
|
|
||||||
void ahash_free_instance(struct crypto_instance *inst)
|
|
||||||
{
|
|
||||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
|
||||||
kfree(ahash_instance(inst));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(ahash_free_instance);
|
|
||||||
|
|
||||||
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
|
|
||||||
struct hash_alg_common *alg,
|
|
||||||
struct crypto_instance *inst)
|
|
||||||
{
|
|
||||||
return crypto_init_spawn2(&spawn->base, &alg->base, inst,
|
|
||||||
&crypto_ahash_type);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
|
|
||||||
|
|
||||||
struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
|
|
||||||
alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
|
|
||||||
return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(ahash_attr_alg);
|
|
||||||
|
|
||||||
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
|
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
|
||||||
{
|
{
|
||||||
struct crypto_alg *alg = &halg->base;
|
struct crypto_alg *alg = &halg->base;
|
||||||
|
@ -90,11 +90,12 @@ static const struct crypto_type crypto_akcipher_type = {
|
|||||||
.tfmsize = offsetof(struct crypto_akcipher, base),
|
.tfmsize = offsetof(struct crypto_akcipher, base),
|
||||||
};
|
};
|
||||||
|
|
||||||
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
|
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
|
||||||
u32 type, u32 mask)
|
struct crypto_instance *inst,
|
||||||
|
const char *name, u32 type, u32 mask)
|
||||||
{
|
{
|
||||||
spawn->base.frontend = &crypto_akcipher_type;
|
spawn->base.frontend = &crypto_akcipher_type;
|
||||||
return crypto_grab_spawn(&spawn->base, name, type, mask);
|
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
|
EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
|
||||||
|
|
||||||
@ -146,6 +147,8 @@ EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
|
|||||||
int akcipher_register_instance(struct crypto_template *tmpl,
|
int akcipher_register_instance(struct crypto_template *tmpl,
|
||||||
struct akcipher_instance *inst)
|
struct akcipher_instance *inst)
|
||||||
{
|
{
|
||||||
|
if (WARN_ON(!inst->free))
|
||||||
|
return -EINVAL;
|
||||||
akcipher_prepare_alg(&inst->alg);
|
akcipher_prepare_alg(&inst->alg);
|
||||||
return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
|
return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
|
||||||
}
|
}
|
||||||
|
250
crypto/algapi.c
250
crypto/algapi.c
@ -65,11 +65,6 @@ static int crypto_check_alg(struct crypto_alg *alg)
|
|||||||
|
|
||||||
static void crypto_free_instance(struct crypto_instance *inst)
|
static void crypto_free_instance(struct crypto_instance *inst)
|
||||||
{
|
{
|
||||||
if (!inst->alg.cra_type->free) {
|
|
||||||
inst->tmpl->free(inst);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
inst->alg.cra_type->free(inst);
|
inst->alg.cra_type->free(inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,6 +77,15 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
|
|||||||
crypto_tmpl_put(tmpl);
|
crypto_tmpl_put(tmpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function adds a spawn to the list secondary_spawns which
|
||||||
|
* will be used at the end of crypto_remove_spawns to unregister
|
||||||
|
* instances, unless the spawn happens to be one that is depended
|
||||||
|
* on by the new algorithm (nalg in crypto_remove_spawns).
|
||||||
|
*
|
||||||
|
* This function is also responsible for resurrecting any algorithms
|
||||||
|
* in the dependency chain of nalg by unsetting n->dead.
|
||||||
|
*/
|
||||||
static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
|
static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
|
||||||
struct list_head *stack,
|
struct list_head *stack,
|
||||||
struct list_head *top,
|
struct list_head *top,
|
||||||
@ -93,15 +97,17 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
|
|||||||
if (!spawn)
|
if (!spawn)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
n = list_next_entry(spawn, list);
|
n = list_prev_entry(spawn, list);
|
||||||
|
|
||||||
if (spawn->alg && &n->list != stack && !n->alg)
|
|
||||||
n->alg = (n->list.next == stack) ? alg :
|
|
||||||
&list_next_entry(n, list)->inst->alg;
|
|
||||||
|
|
||||||
list_move(&spawn->list, secondary_spawns);
|
list_move(&spawn->list, secondary_spawns);
|
||||||
|
|
||||||
return &n->list == stack ? top : &n->inst->alg.cra_users;
|
if (list_is_last(&n->list, stack))
|
||||||
|
return top;
|
||||||
|
|
||||||
|
n = list_next_entry(n, list);
|
||||||
|
if (!spawn->dead)
|
||||||
|
n->dead = false;
|
||||||
|
|
||||||
|
return &n->inst->alg.cra_users;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_remove_instance(struct crypto_instance *inst,
|
static void crypto_remove_instance(struct crypto_instance *inst,
|
||||||
@ -113,8 +119,6 @@ static void crypto_remove_instance(struct crypto_instance *inst,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
|
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
|
||||||
if (hlist_unhashed(&inst->list))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!tmpl || !crypto_tmpl_get(tmpl))
|
if (!tmpl || !crypto_tmpl_get(tmpl))
|
||||||
return;
|
return;
|
||||||
@ -126,6 +130,12 @@ static void crypto_remove_instance(struct crypto_instance *inst,
|
|||||||
BUG_ON(!list_empty(&inst->alg.cra_users));
|
BUG_ON(!list_empty(&inst->alg.cra_users));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given an algorithm alg, remove all algorithms that depend on it
|
||||||
|
* through spawns. If nalg is not null, then exempt any algorithms
|
||||||
|
* that is depended on by nalg. This is useful when nalg itself
|
||||||
|
* depends on alg.
|
||||||
|
*/
|
||||||
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
||||||
struct crypto_alg *nalg)
|
struct crypto_alg *nalg)
|
||||||
{
|
{
|
||||||
@ -144,6 +154,11 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
|||||||
list_move(&spawn->list, &top);
|
list_move(&spawn->list, &top);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Perform a depth-first walk starting from alg through
|
||||||
|
* the cra_users tree. The list stack records the path
|
||||||
|
* from alg to the current spawn.
|
||||||
|
*/
|
||||||
spawns = ⊤
|
spawns = ⊤
|
||||||
do {
|
do {
|
||||||
while (!list_empty(spawns)) {
|
while (!list_empty(spawns)) {
|
||||||
@ -153,17 +168,26 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
|||||||
list);
|
list);
|
||||||
inst = spawn->inst;
|
inst = spawn->inst;
|
||||||
|
|
||||||
BUG_ON(&inst->alg == alg);
|
|
||||||
|
|
||||||
list_move(&spawn->list, &stack);
|
list_move(&spawn->list, &stack);
|
||||||
|
spawn->dead = !spawn->registered || &inst->alg != nalg;
|
||||||
|
|
||||||
|
if (!spawn->registered)
|
||||||
|
break;
|
||||||
|
|
||||||
|
BUG_ON(&inst->alg == alg);
|
||||||
|
|
||||||
if (&inst->alg == nalg)
|
if (&inst->alg == nalg)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
spawn->alg = NULL;
|
|
||||||
spawns = &inst->alg.cra_users;
|
spawns = &inst->alg.cra_users;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* Even if spawn->registered is true, the
|
||||||
|
* instance itself may still be unregistered.
|
||||||
|
* This is because it may have failed during
|
||||||
|
* registration. Therefore we still need to
|
||||||
|
* make the following test.
|
||||||
|
*
|
||||||
* We may encounter an unregistered instance here, since
|
* We may encounter an unregistered instance here, since
|
||||||
* an instance's spawns are set up prior to the instance
|
* an instance's spawns are set up prior to the instance
|
||||||
* being registered. An unregistered instance will have
|
* being registered. An unregistered instance will have
|
||||||
@ -178,10 +202,15 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
|||||||
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
|
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
|
||||||
&secondary_spawns)));
|
&secondary_spawns)));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove all instances that are marked as dead. Also
|
||||||
|
* complete the resurrection of the others by moving them
|
||||||
|
* back to the cra_users list.
|
||||||
|
*/
|
||||||
list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
|
list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
|
||||||
if (spawn->alg)
|
if (!spawn->dead)
|
||||||
list_move(&spawn->list, &spawn->alg->cra_users);
|
list_move(&spawn->list, &spawn->alg->cra_users);
|
||||||
else
|
else if (spawn->registered)
|
||||||
crypto_remove_instance(spawn->inst, list);
|
crypto_remove_instance(spawn->inst, list);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -257,6 +286,7 @@ void crypto_alg_tested(const char *name, int err)
|
|||||||
struct crypto_alg *alg;
|
struct crypto_alg *alg;
|
||||||
struct crypto_alg *q;
|
struct crypto_alg *q;
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
bool best;
|
||||||
|
|
||||||
down_write(&crypto_alg_sem);
|
down_write(&crypto_alg_sem);
|
||||||
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
||||||
@ -280,6 +310,21 @@ found:
|
|||||||
|
|
||||||
alg->cra_flags |= CRYPTO_ALG_TESTED;
|
alg->cra_flags |= CRYPTO_ALG_TESTED;
|
||||||
|
|
||||||
|
/* Only satisfy larval waiters if we are the best. */
|
||||||
|
best = true;
|
||||||
|
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
||||||
|
if (crypto_is_moribund(q) || !crypto_is_larval(q))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (strcmp(alg->cra_name, q->cra_name))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (q->cra_priority > alg->cra_priority) {
|
||||||
|
best = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
||||||
if (q == alg)
|
if (q == alg)
|
||||||
continue;
|
continue;
|
||||||
@ -303,10 +348,12 @@ found:
|
|||||||
continue;
|
continue;
|
||||||
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
|
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
|
||||||
continue;
|
continue;
|
||||||
if (!crypto_mod_get(alg))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
|
if (best && crypto_mod_get(alg))
|
||||||
larval->adult = alg;
|
larval->adult = alg;
|
||||||
|
else
|
||||||
|
larval->adult = ERR_PTR(-EAGAIN);
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,7 +444,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int crypto_unregister_alg(struct crypto_alg *alg)
|
void crypto_unregister_alg(struct crypto_alg *alg)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
@ -406,15 +453,14 @@ int crypto_unregister_alg(struct crypto_alg *alg)
|
|||||||
ret = crypto_remove_alg(alg, &list);
|
ret = crypto_remove_alg(alg, &list);
|
||||||
up_write(&crypto_alg_sem);
|
up_write(&crypto_alg_sem);
|
||||||
|
|
||||||
if (ret)
|
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
|
||||||
return ret;
|
return;
|
||||||
|
|
||||||
BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
|
BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
|
||||||
if (alg->cra_destroy)
|
if (alg->cra_destroy)
|
||||||
alg->cra_destroy(alg);
|
alg->cra_destroy(alg);
|
||||||
|
|
||||||
crypto_remove_final(&list);
|
crypto_remove_final(&list);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
|
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
|
||||||
|
|
||||||
@ -438,18 +484,12 @@ err:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_register_algs);
|
EXPORT_SYMBOL_GPL(crypto_register_algs);
|
||||||
|
|
||||||
int crypto_unregister_algs(struct crypto_alg *algs, int count)
|
void crypto_unregister_algs(struct crypto_alg *algs, int count)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++)
|
||||||
ret = crypto_unregister_alg(&algs[i]);
|
crypto_unregister_alg(&algs[i]);
|
||||||
if (ret)
|
|
||||||
pr_err("Failed to unregister %s %s: %d\n",
|
|
||||||
algs[i].cra_driver_name, algs[i].cra_name, ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_unregister_algs);
|
EXPORT_SYMBOL_GPL(crypto_unregister_algs);
|
||||||
|
|
||||||
@ -561,6 +601,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
|
|||||||
struct crypto_instance *inst)
|
struct crypto_instance *inst)
|
||||||
{
|
{
|
||||||
struct crypto_larval *larval;
|
struct crypto_larval *larval;
|
||||||
|
struct crypto_spawn *spawn;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = crypto_check_alg(&inst->alg);
|
err = crypto_check_alg(&inst->alg);
|
||||||
@ -572,6 +613,22 @@ int crypto_register_instance(struct crypto_template *tmpl,
|
|||||||
|
|
||||||
down_write(&crypto_alg_sem);
|
down_write(&crypto_alg_sem);
|
||||||
|
|
||||||
|
larval = ERR_PTR(-EAGAIN);
|
||||||
|
for (spawn = inst->spawns; spawn;) {
|
||||||
|
struct crypto_spawn *next;
|
||||||
|
|
||||||
|
if (spawn->dead)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
next = spawn->next;
|
||||||
|
spawn->inst = inst;
|
||||||
|
spawn->registered = true;
|
||||||
|
|
||||||
|
crypto_mod_put(spawn->alg);
|
||||||
|
|
||||||
|
spawn = next;
|
||||||
|
}
|
||||||
|
|
||||||
larval = __crypto_register_alg(&inst->alg);
|
larval = __crypto_register_alg(&inst->alg);
|
||||||
if (IS_ERR(larval))
|
if (IS_ERR(larval))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
@ -594,7 +651,7 @@ err:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_register_instance);
|
EXPORT_SYMBOL_GPL(crypto_register_instance);
|
||||||
|
|
||||||
int crypto_unregister_instance(struct crypto_instance *inst)
|
void crypto_unregister_instance(struct crypto_instance *inst)
|
||||||
{
|
{
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
|
||||||
@ -606,62 +663,37 @@ int crypto_unregister_instance(struct crypto_instance *inst)
|
|||||||
up_write(&crypto_alg_sem);
|
up_write(&crypto_alg_sem);
|
||||||
|
|
||||||
crypto_remove_final(&list);
|
crypto_remove_final(&list);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_unregister_instance);
|
EXPORT_SYMBOL_GPL(crypto_unregister_instance);
|
||||||
|
|
||||||
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
|
int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
|
||||||
struct crypto_instance *inst, u32 mask)
|
const char *name, u32 type, u32 mask)
|
||||||
{
|
{
|
||||||
|
struct crypto_alg *alg;
|
||||||
int err = -EAGAIN;
|
int err = -EAGAIN;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(inst == NULL))
|
if (WARN_ON_ONCE(inst == NULL))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spawn->inst = inst;
|
/* Allow the result of crypto_attr_alg_name() to be passed directly */
|
||||||
spawn->mask = mask;
|
if (IS_ERR(name))
|
||||||
|
return PTR_ERR(name);
|
||||||
down_write(&crypto_alg_sem);
|
|
||||||
if (!crypto_is_moribund(alg)) {
|
|
||||||
list_add(&spawn->list, &alg->cra_users);
|
|
||||||
spawn->alg = alg;
|
|
||||||
err = 0;
|
|
||||||
}
|
|
||||||
up_write(&crypto_alg_sem);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_init_spawn);
|
|
||||||
|
|
||||||
int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
|
|
||||||
struct crypto_instance *inst,
|
|
||||||
const struct crypto_type *frontend)
|
|
||||||
{
|
|
||||||
int err = -EINVAL;
|
|
||||||
|
|
||||||
if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
spawn->frontend = frontend;
|
|
||||||
err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
|
|
||||||
|
|
||||||
out:
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_init_spawn2);
|
|
||||||
|
|
||||||
int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
|
|
||||||
u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
alg = crypto_find_alg(name, spawn->frontend, type, mask);
|
alg = crypto_find_alg(name, spawn->frontend, type, mask);
|
||||||
if (IS_ERR(alg))
|
if (IS_ERR(alg))
|
||||||
return PTR_ERR(alg);
|
return PTR_ERR(alg);
|
||||||
|
|
||||||
err = crypto_init_spawn(spawn, alg, spawn->inst, mask);
|
down_write(&crypto_alg_sem);
|
||||||
|
if (!crypto_is_moribund(alg)) {
|
||||||
|
list_add(&spawn->list, &alg->cra_users);
|
||||||
|
spawn->alg = alg;
|
||||||
|
spawn->mask = mask;
|
||||||
|
spawn->next = inst->spawns;
|
||||||
|
inst->spawns = spawn;
|
||||||
|
err = 0;
|
||||||
|
}
|
||||||
|
up_write(&crypto_alg_sem);
|
||||||
|
if (err)
|
||||||
crypto_mod_put(alg);
|
crypto_mod_put(alg);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -669,34 +701,32 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn);
|
|||||||
|
|
||||||
void crypto_drop_spawn(struct crypto_spawn *spawn)
|
void crypto_drop_spawn(struct crypto_spawn *spawn)
|
||||||
{
|
{
|
||||||
if (!spawn->alg)
|
if (!spawn->alg) /* not yet initialized? */
|
||||||
return;
|
return;
|
||||||
|
|
||||||
down_write(&crypto_alg_sem);
|
down_write(&crypto_alg_sem);
|
||||||
|
if (!spawn->dead)
|
||||||
list_del(&spawn->list);
|
list_del(&spawn->list);
|
||||||
up_write(&crypto_alg_sem);
|
up_write(&crypto_alg_sem);
|
||||||
|
|
||||||
|
if (!spawn->registered)
|
||||||
|
crypto_mod_put(spawn->alg);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
|
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
|
||||||
|
|
||||||
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
|
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
|
||||||
{
|
{
|
||||||
struct crypto_alg *alg;
|
struct crypto_alg *alg;
|
||||||
struct crypto_alg *alg2;
|
|
||||||
|
|
||||||
down_read(&crypto_alg_sem);
|
down_read(&crypto_alg_sem);
|
||||||
alg = spawn->alg;
|
alg = spawn->alg;
|
||||||
alg2 = alg;
|
if (!spawn->dead && !crypto_mod_get(alg)) {
|
||||||
if (alg2)
|
alg->cra_flags |= CRYPTO_ALG_DYING;
|
||||||
alg2 = crypto_mod_get(alg2);
|
alg = NULL;
|
||||||
|
}
|
||||||
up_read(&crypto_alg_sem);
|
up_read(&crypto_alg_sem);
|
||||||
|
|
||||||
if (!alg2) {
|
return alg ?: ERR_PTR(-EAGAIN);
|
||||||
if (alg)
|
|
||||||
crypto_shoot_alg(alg);
|
|
||||||
return ERR_PTR(-EAGAIN);
|
|
||||||
}
|
|
||||||
|
|
||||||
return alg;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
|
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
|
||||||
@ -809,20 +839,6 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
|
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
|
||||||
|
|
||||||
struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
|
|
||||||
const struct crypto_type *frontend,
|
|
||||||
u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
const char *name;
|
|
||||||
|
|
||||||
name = crypto_attr_alg_name(rta);
|
|
||||||
if (IS_ERR(name))
|
|
||||||
return ERR_CAST(name);
|
|
||||||
|
|
||||||
return crypto_find_alg(name, frontend, type, mask);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_attr_alg2);
|
|
||||||
|
|
||||||
int crypto_attr_u32(struct rtattr *rta, u32 *num)
|
int crypto_attr_u32(struct rtattr *rta, u32 *num)
|
||||||
{
|
{
|
||||||
struct crypto_attr_u32 *nu32;
|
struct crypto_attr_u32 *nu32;
|
||||||
@ -856,32 +872,6 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_inst_setname);
|
EXPORT_SYMBOL_GPL(crypto_inst_setname);
|
||||||
|
|
||||||
void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
|
|
||||||
unsigned int head)
|
|
||||||
{
|
|
||||||
struct crypto_instance *inst;
|
|
||||||
char *p;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!p)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
inst = (void *)(p + head);
|
|
||||||
|
|
||||||
err = crypto_inst_setname(inst, name, alg);
|
|
||||||
if (err)
|
|
||||||
goto err_free_inst;
|
|
||||||
|
|
||||||
return p;
|
|
||||||
|
|
||||||
err_free_inst:
|
|
||||||
kfree(p);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
|
|
||||||
|
|
||||||
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
|
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
|
||||||
{
|
{
|
||||||
INIT_LIST_HEAD(&queue->list);
|
INIT_LIST_HEAD(&queue->list);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user