2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Scatterlist Cryptographic API.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
|
|
|
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
|
2005-11-05 13:58:14 +08:00
|
|
|
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
|
2007-10-20 05:06:17 +08:00
|
|
|
* and Nettle, by Niels Möller.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-07-07 04:54:31 +08:00
|
|
|
|
2006-09-21 09:39:29 +08:00
|
|
|
#include <linux/err.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/errno.h>
|
2021-09-17 08:26:19 +08:00
|
|
|
#include <linux/jump_label.h>
|
2005-11-05 13:58:14 +08:00
|
|
|
#include <linux/kernel.h>
|
2005-07-07 04:53:09 +08:00
|
|
|
#include <linux/kmod.h>
|
2006-09-21 09:31:44 +08:00
|
|
|
#include <linux/module.h>
|
2006-08-06 19:23:26 +08:00
|
|
|
#include <linux/param.h>
|
2017-02-03 02:15:33 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/slab.h>
|
2005-11-05 13:58:14 +08:00
|
|
|
#include <linux/string.h>
|
2017-10-18 15:00:38 +08:00
|
|
|
#include <linux/completion.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
LIST_HEAD(crypto_alg_list);
|
2006-08-21 19:08:13 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_alg_list);
|
2005-04-17 06:20:36 +08:00
|
|
|
DECLARE_RWSEM(crypto_alg_sem);
|
2006-08-21 19:08:13 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_alg_sem);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-06 19:23:26 +08:00
|
|
|
BLOCKING_NOTIFIER_HEAD(crypto_chain);
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_chain);
|
|
|
|
|
2022-11-14 08:12:35 +08:00
|
|
|
#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
|
|
|
|
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
|
|
|
|
EXPORT_SYMBOL_GPL(__crypto_boot_test_finished);
|
|
|
|
#endif
|
2021-09-17 08:26:19 +08:00
|
|
|
|
2013-09-08 12:33:50 +08:00
|
|
|
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
|
|
|
|
|
2006-08-06 19:23:26 +08:00
|
|
|
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
|
2006-08-06 18:28:44 +08:00
|
|
|
{
|
|
|
|
return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-08-06 19:23:26 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_mod_get);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-06 19:23:26 +08:00
|
|
|
void crypto_mod_put(struct crypto_alg *alg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-05-19 12:51:00 +08:00
|
|
|
struct module *module = alg->cra_module;
|
|
|
|
|
2006-08-06 18:28:44 +08:00
|
|
|
crypto_alg_put(alg);
|
2007-05-19 12:51:00 +08:00
|
|
|
module_put(module);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-08-06 19:23:26 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_mod_put);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-08-04 11:44:59 +08:00
|
|
|
static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
|
|
|
|
u32 mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct crypto_alg *q, *alg = NULL;
|
2006-08-06 19:23:26 +08:00
|
|
|
int best = -2;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
2005-11-05 13:58:14 +08:00
|
|
|
int exact, fuzzy;
|
|
|
|
|
2006-09-21 09:39:29 +08:00
|
|
|
if (crypto_is_moribund(q))
|
|
|
|
continue;
|
|
|
|
|
2006-09-21 09:35:17 +08:00
|
|
|
if ((q->cra_flags ^ type) & mask)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (crypto_is_larval(q) &&
|
2008-08-03 21:15:23 +08:00
|
|
|
!crypto_is_test_larval((struct crypto_larval *)q) &&
|
2006-09-21 09:35:17 +08:00
|
|
|
((struct crypto_larval *)q)->mask != mask)
|
|
|
|
continue;
|
|
|
|
|
2005-11-05 13:58:14 +08:00
|
|
|
exact = !strcmp(q->cra_driver_name, name);
|
|
|
|
fuzzy = !strcmp(q->cra_name, name);
|
|
|
|
if (!exact && !(fuzzy && q->cra_priority > best))
|
|
|
|
continue;
|
|
|
|
|
2006-05-28 07:05:24 +08:00
|
|
|
if (unlikely(!crypto_mod_get(q)))
|
2005-11-05 13:58:14 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
best = q->cra_priority;
|
|
|
|
if (alg)
|
2006-05-28 07:05:24 +08:00
|
|
|
crypto_mod_put(alg);
|
2005-11-05 13:58:14 +08:00
|
|
|
alg = q;
|
|
|
|
|
|
|
|
if (exact)
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
}
|
2006-08-06 19:23:26 +08:00
|
|
|
|
|
|
|
return alg;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void crypto_larval_destroy(struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
struct crypto_larval *larval = (void *)alg;
|
|
|
|
|
|
|
|
BUG_ON(!crypto_is_larval(alg));
|
2019-12-11 10:50:11 +08:00
|
|
|
if (!IS_ERR_OR_NULL(larval->adult))
|
2006-08-06 19:23:26 +08:00
|
|
|
crypto_mod_put(larval->adult);
|
|
|
|
kfree(larval);
|
|
|
|
}
|
|
|
|
|
2008-08-03 21:15:23 +08:00
|
|
|
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
|
2006-08-06 19:23:26 +08:00
|
|
|
{
|
|
|
|
struct crypto_larval *larval;
|
|
|
|
|
|
|
|
larval = kzalloc(sizeof(*larval), GFP_KERNEL);
|
|
|
|
if (!larval)
|
2006-09-21 09:39:29 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2006-08-06 19:23:26 +08:00
|
|
|
|
2006-09-21 09:35:17 +08:00
|
|
|
larval->mask = mask;
|
|
|
|
larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
|
2006-08-06 19:23:26 +08:00
|
|
|
larval->alg.cra_priority = -1;
|
|
|
|
larval->alg.cra_destroy = crypto_larval_destroy;
|
|
|
|
|
2022-08-19 04:59:54 +08:00
|
|
|
strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
|
2006-08-06 19:23:26 +08:00
|
|
|
init_completion(&larval->completion);
|
|
|
|
|
2008-08-03 21:15:23 +08:00
|
|
|
return larval;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_larval_alloc);
|
|
|
|
|
|
|
|
static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
|
|
|
|
u32 mask)
|
|
|
|
{
|
|
|
|
struct crypto_alg *alg;
|
|
|
|
struct crypto_larval *larval;
|
|
|
|
|
|
|
|
larval = crypto_larval_alloc(name, type, mask);
|
|
|
|
if (IS_ERR(larval))
|
|
|
|
return ERR_CAST(larval);
|
|
|
|
|
2017-12-30 00:00:46 +08:00
|
|
|
refcount_set(&larval->alg.cra_refcnt, 2);
|
2008-08-03 21:15:23 +08:00
|
|
|
|
2006-08-06 19:23:26 +08:00
|
|
|
down_write(&crypto_alg_sem);
|
2006-09-21 09:35:17 +08:00
|
|
|
alg = __crypto_alg_lookup(name, type, mask);
|
2006-08-06 19:23:26 +08:00
|
|
|
if (!alg) {
|
|
|
|
alg = &larval->alg;
|
|
|
|
list_add(&alg->cra_list, &crypto_alg_list);
|
|
|
|
}
|
|
|
|
up_write(&crypto_alg_sem);
|
|
|
|
|
2013-09-08 12:33:50 +08:00
|
|
|
if (alg != &larval->alg) {
|
2006-08-06 19:23:26 +08:00
|
|
|
kfree(larval);
|
2013-09-08 12:33:50 +08:00
|
|
|
if (crypto_is_larval(alg))
|
|
|
|
alg = crypto_larval_wait(alg);
|
|
|
|
}
|
2006-08-06 19:23:26 +08:00
|
|
|
|
|
|
|
return alg;
|
|
|
|
}
|
|
|
|
|
2007-12-04 09:46:48 +08:00
|
|
|
void crypto_larval_kill(struct crypto_alg *alg)
|
2006-08-06 19:23:26 +08:00
|
|
|
{
|
|
|
|
struct crypto_larval *larval = (void *)alg;
|
|
|
|
|
|
|
|
down_write(&crypto_alg_sem);
|
|
|
|
list_del(&alg->cra_list);
|
|
|
|
up_write(&crypto_alg_sem);
|
2007-05-19 15:51:40 +08:00
|
|
|
complete_all(&larval->completion);
|
2006-08-06 19:23:26 +08:00
|
|
|
crypto_alg_put(alg);
|
|
|
|
}
|
2007-12-04 09:46:48 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_larval_kill);
|
2006-08-06 19:23:26 +08:00
|
|
|
|
2021-09-17 08:26:19 +08:00
|
|
|
void crypto_wait_for_test(struct crypto_larval *larval)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
|
2021-10-19 21:28:02 +08:00
|
|
|
if (WARN_ON_ONCE(err != NOTIFY_STOP))
|
|
|
|
goto out;
|
2021-09-17 08:26:19 +08:00
|
|
|
|
|
|
|
err = wait_for_completion_killable(&larval->completion);
|
|
|
|
WARN_ON(err);
|
|
|
|
out:
|
|
|
|
crypto_larval_kill(&larval->alg);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_wait_for_test);
|
|
|
|
|
|
|
|
static void crypto_start_test(struct crypto_larval *larval)
|
|
|
|
{
|
|
|
|
if (!crypto_is_test_larval(larval))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (larval->test_started)
|
|
|
|
return;
|
|
|
|
|
|
|
|
down_write(&crypto_alg_sem);
|
|
|
|
if (larval->test_started) {
|
|
|
|
up_write(&crypto_alg_sem);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
larval->test_started = true;
|
|
|
|
up_write(&crypto_alg_sem);
|
|
|
|
|
|
|
|
crypto_wait_for_test(larval);
|
|
|
|
}
|
|
|
|
|
2006-08-06 19:23:26 +08:00
|
|
|
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
struct crypto_larval *larval = (void *)alg;
|
2008-08-03 21:15:23 +08:00
|
|
|
long timeout;
|
|
|
|
|
2022-11-14 08:12:35 +08:00
|
|
|
if (!crypto_boot_test_finished())
|
2021-09-17 08:26:19 +08:00
|
|
|
crypto_start_test(larval);
|
|
|
|
|
2015-10-19 18:23:57 +08:00
|
|
|
timeout = wait_for_completion_killable_timeout(
|
2008-08-03 21:15:23 +08:00
|
|
|
&larval->completion, 60 * HZ);
|
2006-08-06 19:23:26 +08:00
|
|
|
|
|
|
|
alg = larval->adult;
|
2008-08-03 21:15:23 +08:00
|
|
|
if (timeout < 0)
|
|
|
|
alg = ERR_PTR(-EINTR);
|
|
|
|
else if (!timeout)
|
|
|
|
alg = ERR_PTR(-ETIMEDOUT);
|
|
|
|
else if (!alg)
|
2006-09-21 09:39:29 +08:00
|
|
|
alg = ERR_PTR(-ENOENT);
|
2019-12-11 10:50:11 +08:00
|
|
|
else if (IS_ERR(alg))
|
|
|
|
;
|
2008-08-03 21:15:23 +08:00
|
|
|
else if (crypto_is_test_larval(larval) &&
|
|
|
|
!(alg->cra_flags & CRYPTO_ALG_TESTED))
|
|
|
|
alg = ERR_PTR(-EAGAIN);
|
crypto: api - allow algs only in specific constructions in FIPS mode
Currently we do not distinguish between algorithms that fail on
the self-test vs. those which are disabled in FIPS mode (not allowed).
Both are marked as having failed the self-test.
Recently the need arose to allow the usage of certain algorithms only
as arguments to specific template instantiations in FIPS mode. For
example, standalone "dh" must be blocked, but e.g. "ffdhe2048(dh)" is
allowed. Other potential use cases include "cbcmac(aes)", which must
only be used with ccm(), or "ghash", which must be used only for
gcm().
This patch allows this scenario by adding a new flag FIPS_INTERNAL to
indicate those algorithms that are not FIPS-allowed. They can then be
used as template arguments only, i.e. when looked up via
crypto_grab_spawn() to be more specific. The FIPS_INTERNAL bit gets
propagated upwards recursively into the surrounding template
instances, until the construction eventually matches an explicit
testmgr entry with ->fips_allowed being set, if any.
The behaviour to skip !->fips_allowed self-test executions in FIPS
mode will be retained. Note that this effectively means that
FIPS_INTERNAL algorithms are handled very similarly to the INTERNAL
ones in this regard. It is expected that the FIPS_INTERNAL algorithms
will receive sufficient testing when the larger constructions they're
a part of, if any, get exercised by testmgr.
Note that as a side-effect of this patch algorithms which are not
FIPS-allowed will now return ENOENT instead of ELIBBAD. Hopefully
this is not an issue as some people were relying on this already.
Link: https://lore.kernel.org/r/YeEVSaMEVJb3cQkq@gondor.apana.org.au
Originally-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-02-21 20:10:58 +08:00
|
|
|
else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)
|
|
|
|
alg = ERR_PTR(-EAGAIN);
|
2008-08-03 21:15:23 +08:00
|
|
|
else if (!crypto_mod_get(alg))
|
|
|
|
alg = ERR_PTR(-EAGAIN);
|
2006-08-06 19:23:26 +08:00
|
|
|
crypto_mod_put(&larval->alg);
|
|
|
|
|
|
|
|
return alg;
|
|
|
|
}
|
|
|
|
|
2018-03-20 08:05:39 +08:00
|
|
|
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
|
|
|
|
u32 mask)
|
2006-08-06 19:23:26 +08:00
|
|
|
{
|
crypto: api - allow algs only in specific constructions in FIPS mode
Currently we do not distinguish between algorithms that fail on
the self-test vs. those which are disabled in FIPS mode (not allowed).
Both are marked as having failed the self-test.
Recently the need arose to allow the usage of certain algorithms only
as arguments to specific template instantiations in FIPS mode. For
example, standalone "dh" must be blocked, but e.g. "ffdhe2048(dh)" is
allowed. Other potential use cases include "cbcmac(aes)", which must
only be used with ccm(), or "ghash", which must be used only for
gcm().
This patch allows this scenario by adding a new flag FIPS_INTERNAL to
indicate those algorithms that are not FIPS-allowed. They can then be
used as template arguments only, i.e. when looked up via
crypto_grab_spawn() to be more specific. The FIPS_INTERNAL bit gets
propagated upwards recursively into the surrounding template
instances, until the construction eventually matches an explicit
testmgr entry with ->fips_allowed being set, if any.
The behaviour to skip !->fips_allowed self-test executions in FIPS
mode will be retained. Note that this effectively means that
FIPS_INTERNAL algorithms are handled very similarly to the INTERNAL
ones in this regard. It is expected that the FIPS_INTERNAL algorithms
will receive sufficient testing when the larger constructions they're
a part of, if any, get exercised by testmgr.
Note that as a side-effect of this patch algorithms which are not
FIPS-allowed will now return ENOENT instead of ELIBBAD. Hopefully
this is not an issue as some people were relying on this already.
Link: https://lore.kernel.org/r/YeEVSaMEVJb3cQkq@gondor.apana.org.au
Originally-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-02-21 20:10:58 +08:00
|
|
|
const u32 fips = CRYPTO_ALG_FIPS_INTERNAL;
|
2006-08-06 19:23:26 +08:00
|
|
|
struct crypto_alg *alg;
|
2018-03-20 15:52:45 +08:00
|
|
|
u32 test = 0;
|
|
|
|
|
|
|
|
if (!((type | mask) & CRYPTO_ALG_TESTED))
|
|
|
|
test |= CRYPTO_ALG_TESTED;
|
2006-08-06 19:23:26 +08:00
|
|
|
|
|
|
|
down_read(&crypto_alg_sem);
|
crypto: api - allow algs only in specific constructions in FIPS mode
Currently we do not distinguish between algorithms that fail on
the self-test vs. those which are disabled in FIPS mode (not allowed).
Both are marked as having failed the self-test.
Recently the need arose to allow the usage of certain algorithms only
as arguments to specific template instantiations in FIPS mode. For
example, standalone "dh" must be blocked, but e.g. "ffdhe2048(dh)" is
allowed. Other potential use cases include "cbcmac(aes)", which must
only be used with ccm(), or "ghash", which must be used only for
gcm().
This patch allows this scenario by adding a new flag FIPS_INTERNAL to
indicate those algorithms that are not FIPS-allowed. They can then be
used as template arguments only, i.e. when looked up via
crypto_grab_spawn() to be more specific. The FIPS_INTERNAL bit gets
propagated upwards recursively into the surrounding template
instances, until the construction eventually matches an explicit
testmgr entry with ->fips_allowed being set, if any.
The behaviour to skip !->fips_allowed self-test executions in FIPS
mode will be retained. Note that this effectively means that
FIPS_INTERNAL algorithms are handled very similarly to the INTERNAL
ones in this regard. It is expected that the FIPS_INTERNAL algorithms
will receive sufficient testing when the larger constructions they're
a part of, if any, get exercised by testmgr.
Note that as a side-effect of this patch algorithms which are not
FIPS-allowed will now return ENOENT instead of ELIBBAD. Hopefully
this is not an issue as some people were relying on this already.
Link: https://lore.kernel.org/r/YeEVSaMEVJb3cQkq@gondor.apana.org.au
Originally-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-02-21 20:10:58 +08:00
|
|
|
alg = __crypto_alg_lookup(name, (type | test) & ~fips,
|
|
|
|
(mask | test) & ~fips);
|
|
|
|
if (alg) {
|
|
|
|
if (((type | mask) ^ fips) & fips)
|
|
|
|
mask |= fips;
|
|
|
|
mask &= fips;
|
|
|
|
|
|
|
|
if (!crypto_is_larval(alg) &&
|
|
|
|
((type ^ alg->cra_flags) & mask)) {
|
|
|
|
/* Algorithm is disallowed in FIPS mode. */
|
|
|
|
crypto_mod_put(alg);
|
|
|
|
alg = ERR_PTR(-ENOENT);
|
|
|
|
}
|
|
|
|
} else if (test) {
|
2018-04-17 07:59:13 +08:00
|
|
|
alg = __crypto_alg_lookup(name, type, mask);
|
|
|
|
if (alg && !crypto_is_larval(alg)) {
|
|
|
|
/* Test failed */
|
|
|
|
crypto_mod_put(alg);
|
|
|
|
alg = ERR_PTR(-ELIBBAD);
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
up_read(&crypto_alg_sem);
|
2006-08-06 19:23:26 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return alg;
|
|
|
|
}
|
|
|
|
|
2017-12-08 02:55:59 +08:00
|
|
|
static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
|
|
|
|
u32 mask)
|
2005-07-07 04:53:09 +08:00
|
|
|
{
|
2006-08-06 19:23:26 +08:00
|
|
|
struct crypto_alg *alg;
|
|
|
|
|
2006-09-21 09:39:29 +08:00
|
|
|
if (!name)
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
2016-11-22 20:08:21 +08:00
|
|
|
type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
2006-09-21 09:39:29 +08:00
|
|
|
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
2006-09-21 09:35:17 +08:00
|
|
|
|
2009-02-26 14:06:31 +08:00
|
|
|
alg = crypto_alg_lookup(name, type, mask);
|
2018-06-09 05:57:42 +08:00
|
|
|
if (!alg && !(mask & CRYPTO_NOLOAD)) {
|
2014-11-21 09:05:53 +08:00
|
|
|
request_module("crypto-%s", name);
|
2009-02-26 14:06:31 +08:00
|
|
|
|
2009-04-21 13:27:16 +08:00
|
|
|
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
|
2009-06-02 12:13:14 +08:00
|
|
|
CRYPTO_ALG_NEED_FALLBACK))
|
2014-11-21 09:05:53 +08:00
|
|
|
request_module("crypto-%s-all", name);
|
2009-02-26 14:06:31 +08:00
|
|
|
|
|
|
|
alg = crypto_alg_lookup(name, type, mask);
|
|
|
|
}
|
|
|
|
|
2018-03-20 15:52:45 +08:00
|
|
|
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
|
|
|
|
alg = crypto_larval_wait(alg);
|
|
|
|
else if (!alg)
|
|
|
|
alg = crypto_larval_add(name, type, mask);
|
2006-08-06 19:23:26 +08:00
|
|
|
|
2018-03-20 15:52:45 +08:00
|
|
|
return alg;
|
2007-12-04 09:46:48 +08:00
|
|
|
}
|
|
|
|
|
2008-08-03 21:15:23 +08:00
|
|
|
int crypto_probing_notify(unsigned long val, void *v)
|
|
|
|
{
|
|
|
|
int ok;
|
|
|
|
|
|
|
|
ok = blocking_notifier_call_chain(&crypto_chain, val, v);
|
|
|
|
if (ok == NOTIFY_DONE) {
|
|
|
|
request_module("cryptomgr");
|
|
|
|
ok = blocking_notifier_call_chain(&crypto_chain, val, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_probing_notify);
|
|
|
|
|
2007-12-04 09:46:48 +08:00
|
|
|
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
|
|
|
{
|
|
|
|
struct crypto_alg *alg;
|
|
|
|
struct crypto_alg *larval;
|
|
|
|
int ok;
|
|
|
|
|
2015-03-31 03:55:52 +08:00
|
|
|
/*
|
|
|
|
* If the internal flag is set for a cipher, require a caller to
|
2022-08-11 20:13:49 +08:00
|
|
|
* invoke the cipher with the internal flag to use that cipher.
|
2015-03-31 03:55:52 +08:00
|
|
|
* Also, if a caller wants to allocate a cipher that may or may
|
|
|
|
* not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
|
|
|
|
* !(mask & CRYPTO_ALG_INTERNAL).
|
|
|
|
*/
|
|
|
|
if (!((type | mask) & CRYPTO_ALG_INTERNAL))
|
|
|
|
mask |= CRYPTO_ALG_INTERNAL;
|
|
|
|
|
2007-12-04 09:46:48 +08:00
|
|
|
larval = crypto_larval_lookup(name, type, mask);
|
2006-09-21 09:39:29 +08:00
|
|
|
if (IS_ERR(larval) || !crypto_is_larval(larval))
|
2006-08-06 19:23:26 +08:00
|
|
|
return larval;
|
|
|
|
|
2008-08-03 21:15:23 +08:00
|
|
|
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
|
2006-09-21 09:31:44 +08:00
|
|
|
|
|
|
|
if (ok == NOTIFY_STOP)
|
2006-08-06 19:23:26 +08:00
|
|
|
alg = crypto_larval_wait(larval);
|
|
|
|
else {
|
|
|
|
crypto_mod_put(larval);
|
2006-09-21 09:39:29 +08:00
|
|
|
alg = ERR_PTR(-ENOENT);
|
2006-08-06 19:23:26 +08:00
|
|
|
}
|
|
|
|
crypto_larval_kill(larval);
|
|
|
|
return alg;
|
2005-07-07 04:53:09 +08:00
|
|
|
}
|
2006-09-21 09:35:17 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
|
2005-07-07 04:53:09 +08:00
|
|
|
|
2007-01-24 17:50:26 +08:00
|
|
|
static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-01-24 17:50:26 +08:00
|
|
|
const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;
|
2006-08-21 22:06:54 +08:00
|
|
|
|
2007-01-24 17:50:26 +08:00
|
|
|
if (type_obj)
|
|
|
|
return type_obj->init(tfm, type, mask);
|
crypto: cipher - remove crt_u.cipher (struct cipher_tfm)
Of the three fields in crt_u.cipher (struct cipher_tfm), ->cit_setkey()
is pointless because it always points to setkey() in crypto/cipher.c.
->cit_decrypt_one() and ->cit_encrypt_one() are slightly less pointless,
since if the algorithm doesn't have an alignmask, they are set directly
to ->cia_encrypt() and ->cia_decrypt(). However, this "optimization"
isn't worthwhile because:
- The "cipher" algorithm type is the only algorithm still using crt_u,
so it's bloating every struct crypto_tfm for every algorithm type.
- If the algorithm has an alignmask, this "optimization" actually makes
things slower, as it causes 2 indirect calls per block rather than 1.
- It adds extra code complexity.
- Some templates already call ->cia_encrypt()/->cia_decrypt() directly
instead of going through ->cit_encrypt_one()/->cit_decrypt_one().
- The "cipher" algorithm type never gives optimal performance anyway.
For that, a higher-level type such as skcipher needs to be used.
Therefore, just remove the extra indirection, and make
crypto_cipher_setkey(), crypto_cipher_encrypt_one(), and
crypto_cipher_decrypt_one() be direct calls into crypto/cipher.c.
Also remove the unused function crypto_cipher_cast().
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-03 05:42:30 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void crypto_exit_ops(struct crypto_tfm *tfm)
|
|
|
|
{
|
2006-08-21 22:06:54 +08:00
|
|
|
const struct crypto_type *type = tfm->__crt_alg->cra_type;
|
|
|
|
|
2016-10-08 05:13:35 +08:00
|
|
|
if (type && tfm->exit)
|
|
|
|
tfm->exit(tfm);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-01-24 17:50:26 +08:00
|
|
|
static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
|
2005-07-07 04:53:29 +08:00
|
|
|
{
|
2007-01-24 17:50:26 +08:00
|
|
|
const struct crypto_type *type_obj = alg->cra_type;
|
2005-07-07 04:53:29 +08:00
|
|
|
unsigned int len;
|
|
|
|
|
2006-08-21 22:06:54 +08:00
|
|
|
len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
2007-01-24 17:50:26 +08:00
|
|
|
if (type_obj)
|
|
|
|
return len + type_obj->ctxsize(alg, type, mask);
|
2006-08-21 22:06:54 +08:00
|
|
|
|
2005-07-07 04:53:29 +08:00
|
|
|
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
case CRYPTO_ALG_TYPE_CIPHER:
|
2007-01-27 07:05:15 +08:00
|
|
|
len += crypto_cipher_ctxsize(alg);
|
2005-07-07 04:53:29 +08:00
|
|
|
break;
|
2009-07-12 13:58:04 +08:00
|
|
|
|
2005-07-07 04:53:29 +08:00
|
|
|
case CRYPTO_ALG_TYPE_COMPRESS:
|
2007-01-27 07:05:15 +08:00
|
|
|
len += crypto_compress_ctxsize(alg);
|
2005-07-07 04:53:29 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-08-21 22:06:54 +08:00
|
|
|
return len;
|
2005-07-07 04:53:29 +08:00
|
|
|
}
|
|
|
|
|
2020-04-10 14:09:42 +08:00
|
|
|
void crypto_shoot_alg(struct crypto_alg *alg)
|
2006-09-21 09:39:29 +08:00
|
|
|
{
|
|
|
|
down_write(&crypto_alg_sem);
|
|
|
|
alg->cra_flags |= CRYPTO_ALG_DYING;
|
|
|
|
up_write(&crypto_alg_sem);
|
|
|
|
}
|
2020-04-10 14:09:42 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
|
2006-09-21 09:39:29 +08:00
|
|
|
|
2007-01-24 17:50:26 +08:00
|
|
|
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
|
|
|
u32 mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct crypto_tfm *tfm = NULL;
|
2005-07-07 04:53:29 +08:00
|
|
|
unsigned int tfm_size;
|
2006-09-21 09:39:29 +08:00
|
|
|
int err = -ENOMEM;
|
2005-07-07 04:53:29 +08:00
|
|
|
|
2007-01-24 17:50:26 +08:00
|
|
|
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
|
2006-03-06 18:42:07 +08:00
|
|
|
tfm = kzalloc(tfm_size, GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (tfm == NULL)
|
2006-10-11 20:29:51 +08:00
|
|
|
goto out_err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
tfm->__crt_alg = alg;
|
2006-09-21 09:39:29 +08:00
|
|
|
|
2007-01-24 17:50:26 +08:00
|
|
|
err = crypto_init_ops(tfm, type, mask);
|
2006-09-21 09:39:29 +08:00
|
|
|
if (err)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_free_tfm;
|
2006-05-24 11:02:26 +08:00
|
|
|
|
2008-09-14 09:19:03 +08:00
|
|
|
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
2006-05-24 11:02:26 +08:00
|
|
|
goto cra_init_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
goto out;
|
|
|
|
|
2006-05-24 11:02:26 +08:00
|
|
|
cra_init_failed:
|
|
|
|
crypto_exit_ops(tfm);
|
2005-04-17 06:20:36 +08:00
|
|
|
out_free_tfm:
|
2008-09-14 09:19:03 +08:00
|
|
|
if (err == -EAGAIN)
|
|
|
|
crypto_shoot_alg(alg);
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(tfm);
|
2006-10-11 20:29:51 +08:00
|
|
|
out_err:
|
2006-09-21 09:39:29 +08:00
|
|
|
tfm = ERR_PTR(err);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
return tfm;
|
|
|
|
}
|
2006-09-21 09:39:29 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
|
|
|
|
|
2006-07-30 09:53:01 +08:00
|
|
|
/*
|
|
|
|
* crypto_alloc_base - Locate algorithm and allocate transform
|
|
|
|
* @alg_name: Name of algorithm
|
|
|
|
* @type: Type of algorithm
|
|
|
|
* @mask: Mask for type comparison
|
|
|
|
*
|
2008-09-21 05:52:53 +08:00
|
|
|
* This function should not be used by new algorithm types.
|
2013-06-28 20:56:20 +08:00
|
|
|
* Please use crypto_alloc_tfm instead.
|
2008-09-21 05:52:53 +08:00
|
|
|
*
|
2006-07-30 09:53:01 +08:00
|
|
|
* crypto_alloc_base() will first attempt to locate an already loaded
|
|
|
|
* algorithm. If that fails and the kernel supports dynamically loadable
|
|
|
|
* modules, it will then attempt to load a module of the same name or
|
|
|
|
* alias. If that fails it will send a query to any loaded crypto manager
|
|
|
|
* to construct an algorithm on the fly. A refcount is grabbed on the
|
|
|
|
* algorithm which is then associated with the new transform.
|
|
|
|
*
|
|
|
|
* The returned transform is of a non-determinate type. Most people
|
|
|
|
* should use one of the more specific allocation functions such as
|
crypto: skcipher - remove the "blkcipher" algorithm type
Now that all "blkcipher" algorithms have been converted to "skcipher",
remove the blkcipher algorithm type.
The skcipher (symmetric key cipher) algorithm type was introduced a few
years ago to replace both blkcipher and ablkcipher (synchronous and
asynchronous block cipher). The advantages of skcipher include:
- A much less confusing name, since none of these algorithm types have
ever actually been for raw block ciphers, but rather for all
length-preserving encryption modes including block cipher modes of
operation, stream ciphers, and other length-preserving modes.
- It unified blkcipher and ablkcipher into a single algorithm type
which supports both synchronous and asynchronous implementations.
Note, blkcipher already operated only on scatterlists, so the fact
that skcipher does too isn't a regression in functionality.
- Better type safety by using struct skcipher_alg, struct
crypto_skcipher, etc. instead of crypto_alg, crypto_tfm, etc.
- It sometimes simplifies the implementations of algorithms.
Also, the blkcipher API was no longer being tested.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-10-26 03:41:12 +08:00
|
|
|
* crypto_alloc_skcipher().
|
2006-07-30 09:53:01 +08:00
|
|
|
*
|
|
|
|
* In case of error the return value is an error pointer.
|
|
|
|
*/
|
|
|
|
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
|
|
|
|
{
|
|
|
|
struct crypto_tfm *tfm;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
struct crypto_alg *alg;
|
|
|
|
|
|
|
|
alg = crypto_alg_mod_lookup(alg_name, type, mask);
|
2006-10-11 20:29:51 +08:00
|
|
|
if (IS_ERR(alg)) {
|
|
|
|
err = PTR_ERR(alg);
|
2006-07-30 09:53:01 +08:00
|
|
|
goto err;
|
2006-10-11 20:29:51 +08:00
|
|
|
}
|
2006-07-30 09:53:01 +08:00
|
|
|
|
2007-01-24 17:50:26 +08:00
|
|
|
tfm = __crypto_alloc_tfm(alg, type, mask);
|
2006-07-30 09:53:01 +08:00
|
|
|
if (!IS_ERR(tfm))
|
2006-10-11 20:29:51 +08:00
|
|
|
return tfm;
|
2006-07-30 09:53:01 +08:00
|
|
|
|
|
|
|
crypto_mod_put(alg);
|
|
|
|
err = PTR_ERR(tfm);
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (err != -EAGAIN)
|
|
|
|
break;
|
2015-10-19 18:23:57 +08:00
|
|
|
if (fatal_signal_pending(current)) {
|
2006-07-30 09:53:01 +08:00
|
|
|
err = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
2006-10-11 20:29:51 +08:00
|
|
|
}
|
2006-07-30 09:53:01 +08:00
|
|
|
|
2006-10-11 20:29:51 +08:00
|
|
|
return ERR_PTR(err);
|
2006-07-30 09:53:01 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_alloc_base);
|
2008-09-21 05:52:53 +08:00
|
|
|
|
2020-07-05 17:18:58 +08:00
|
|
|
void *crypto_create_tfm_node(struct crypto_alg *alg,
|
|
|
|
const struct crypto_type *frontend,
|
|
|
|
int node)
|
2008-09-21 05:52:53 +08:00
|
|
|
{
|
|
|
|
char *mem;
|
|
|
|
struct crypto_tfm *tfm = NULL;
|
|
|
|
unsigned int tfmsize;
|
|
|
|
unsigned int total;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
tfmsize = frontend->tfmsize;
|
2009-07-13 20:46:25 +08:00
|
|
|
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
|
2008-09-21 05:52:53 +08:00
|
|
|
|
2020-07-05 17:18:58 +08:00
|
|
|
mem = kzalloc_node(total, GFP_KERNEL, node);
|
2008-09-21 05:52:53 +08:00
|
|
|
if (mem == NULL)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
tfm = (struct crypto_tfm *)(mem + tfmsize);
|
|
|
|
tfm->__crt_alg = alg;
|
2020-07-05 17:18:58 +08:00
|
|
|
tfm->node = node;
|
2008-09-21 05:52:53 +08:00
|
|
|
|
2009-07-13 20:46:25 +08:00
|
|
|
err = frontend->init_tfm(tfm);
|
2008-09-21 05:52:53 +08:00
|
|
|
if (err)
|
|
|
|
goto out_free_tfm;
|
|
|
|
|
|
|
|
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
|
|
|
goto cra_init_failed;
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cra_init_failed:
|
|
|
|
crypto_exit_ops(tfm);
|
|
|
|
out_free_tfm:
|
|
|
|
if (err == -EAGAIN)
|
|
|
|
crypto_shoot_alg(alg);
|
|
|
|
kfree(mem);
|
|
|
|
out_err:
|
2009-02-18 16:56:59 +08:00
|
|
|
mem = ERR_PTR(err);
|
2008-09-21 05:52:53 +08:00
|
|
|
out:
|
2009-02-18 16:56:59 +08:00
|
|
|
return mem;
|
2008-09-21 05:52:53 +08:00
|
|
|
}
|
2020-07-05 17:18:58 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
|
2008-09-21 05:52:53 +08:00
|
|
|
|
2009-07-08 17:53:16 +08:00
|
|
|
struct crypto_alg *crypto_find_alg(const char *alg_name,
|
|
|
|
const struct crypto_type *frontend,
|
|
|
|
u32 type, u32 mask)
|
|
|
|
{
|
|
|
|
if (frontend) {
|
|
|
|
type &= frontend->maskclear;
|
|
|
|
mask &= frontend->maskclear;
|
|
|
|
type |= frontend->type;
|
|
|
|
mask |= frontend->maskset;
|
|
|
|
}
|
|
|
|
|
2018-03-20 07:41:00 +08:00
|
|
|
return crypto_alg_mod_lookup(alg_name, type, mask);
|
2009-07-08 17:53:16 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_find_alg);
|
|
|
|
|
2008-09-21 05:52:53 +08:00
|
|
|
/*
|
2020-07-05 17:18:58 +08:00
|
|
|
* crypto_alloc_tfm_node - Locate algorithm and allocate transform
|
2008-09-21 05:52:53 +08:00
|
|
|
* @alg_name: Name of algorithm
|
|
|
|
* @frontend: Frontend algorithm type
|
|
|
|
* @type: Type of algorithm
|
|
|
|
* @mask: Mask for type comparison
|
2020-07-05 17:18:58 +08:00
|
|
|
* @node: NUMA node in which users desire to put requests, if node is
|
|
|
|
* NUMA_NO_NODE, it means users have no special requirement.
|
2008-09-21 05:52:53 +08:00
|
|
|
*
|
|
|
|
* crypto_alloc_tfm() will first attempt to locate an already loaded
|
|
|
|
* algorithm. If that fails and the kernel supports dynamically loadable
|
|
|
|
* modules, it will then attempt to load a module of the same name or
|
|
|
|
* alias. If that fails it will send a query to any loaded crypto manager
|
|
|
|
* to construct an algorithm on the fly. A refcount is grabbed on the
|
|
|
|
* algorithm which is then associated with the new transform.
|
|
|
|
*
|
|
|
|
* The returned transform is of a non-determinate type. Most people
|
|
|
|
* should use one of the more specific allocation functions such as
|
2019-11-30 02:16:48 +08:00
|
|
|
* crypto_alloc_skcipher().
|
2008-09-21 05:52:53 +08:00
|
|
|
*
|
|
|
|
* In case of error the return value is an error pointer.
|
|
|
|
*/
|
2020-07-05 17:18:58 +08:00
|
|
|
|
|
|
|
void *crypto_alloc_tfm_node(const char *alg_name,
|
|
|
|
const struct crypto_type *frontend, u32 type, u32 mask,
|
|
|
|
int node)
|
2008-09-21 05:52:53 +08:00
|
|
|
{
|
2009-02-18 16:56:59 +08:00
|
|
|
void *tfm;
|
2008-09-21 05:52:53 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
struct crypto_alg *alg;
|
|
|
|
|
2009-07-08 17:53:16 +08:00
|
|
|
alg = crypto_find_alg(alg_name, frontend, type, mask);
|
2008-09-21 05:52:53 +08:00
|
|
|
if (IS_ERR(alg)) {
|
|
|
|
err = PTR_ERR(alg);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2020-07-05 17:18:58 +08:00
|
|
|
tfm = crypto_create_tfm_node(alg, frontend, node);
|
2008-09-21 05:52:53 +08:00
|
|
|
if (!IS_ERR(tfm))
|
|
|
|
return tfm;
|
|
|
|
|
|
|
|
crypto_mod_put(alg);
|
|
|
|
err = PTR_ERR(tfm);
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (err != -EAGAIN)
|
|
|
|
break;
|
2015-10-19 18:23:57 +08:00
|
|
|
if (fatal_signal_pending(current)) {
|
2008-09-21 05:52:53 +08:00
|
|
|
err = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
2020-07-05 17:18:58 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
|
2009-02-05 13:48:24 +08:00
|
|
|
|
2006-07-30 09:53:01 +08:00
|
|
|
/*
|
2009-02-05 13:48:24 +08:00
|
|
|
* crypto_destroy_tfm - Free crypto transform
|
|
|
|
* @mem: Start of tfm slab
|
2006-07-30 09:53:01 +08:00
|
|
|
* @tfm: Transform to free
|
|
|
|
*
|
2009-02-05 13:48:24 +08:00
|
|
|
* This function frees up the transform and any associated resources,
|
2006-07-30 09:53:01 +08:00
|
|
|
* then drops the refcount on the associated algorithm.
|
|
|
|
*/
|
2009-02-05 13:48:24 +08:00
|
|
|
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-07 04:54:31 +08:00
|
|
|
struct crypto_alg *alg;
|
|
|
|
|
2021-03-03 04:33:03 +08:00
|
|
|
if (IS_ERR_OR_NULL(mem))
|
2005-07-07 04:54:31 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
alg = tfm->__crt_alg;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-09-14 09:19:03 +08:00
|
|
|
if (!tfm->exit && alg->cra_exit)
|
2006-05-24 11:02:26 +08:00
|
|
|
alg->cra_exit(tfm);
|
2005-04-17 06:20:36 +08:00
|
|
|
crypto_exit_ops(tfm);
|
2006-05-28 07:05:24 +08:00
|
|
|
crypto_mod_put(alg);
|
2020-08-07 14:18:13 +08:00
|
|
|
kfree_sensitive(mem);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-02-05 13:48:24 +08:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
|
2006-08-26 15:35:45 +08:00
|
|
|
|
|
|
|
int crypto_has_alg(const char *name, u32 type, u32 mask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
|
2010-02-16 20:26:46 +08:00
|
|
|
|
2006-08-26 15:35:45 +08:00
|
|
|
if (!IS_ERR(alg)) {
|
|
|
|
crypto_mod_put(alg);
|
|
|
|
ret = 1;
|
|
|
|
}
|
2010-02-16 20:26:46 +08:00
|
|
|
|
2006-08-26 15:35:45 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_has_alg);
|
2008-03-30 16:36:09 +08:00
|
|
|
|
2023-02-08 13:58:44 +08:00
|
|
|
void crypto_req_done(void *data, int err)
|
2017-10-18 15:00:38 +08:00
|
|
|
{
|
2023-02-08 13:58:44 +08:00
|
|
|
struct crypto_wait *wait = data;
|
2017-10-18 15:00:38 +08:00
|
|
|
|
|
|
|
if (err == -EINPROGRESS)
|
|
|
|
return;
|
|
|
|
|
|
|
|
wait->err = err;
|
|
|
|
complete(&wait->completion);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_req_done);
|
|
|
|
|
2008-03-30 16:36:09 +08:00
|
|
|
MODULE_DESCRIPTION("Cryptographic core API");
|
|
|
|
MODULE_LICENSE("GPL");
|