mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-28 13:34:38 +08:00
dc89997264
Always use crypto_has_comp() so that crypto can lookup module, call usermodhelper to load the modules, wait for usermodhelper to finish and so on. Otherwise crypto will do all of these steps under CPU hot-plug lock and this looks like too much stuff to handle under the CPU hot-plug lock. Besides this can end up in a deadlock when usermodhelper triggers a code path that attempts to lock the CPU hot-plug lock, that zram already holds. An example of such deadlock: - path A. zram grabs CPU hot-plug lock, execs /sbin/modprobe from crypto and waits for modprobe to finish disksize_store zcomp_create __cpuhp_state_add_instance __cpuhp_state_add_instance_cpuslocked zcomp_cpu_up_prepare crypto_alloc_base crypto_alg_mod_lookup call_usermodehelper_exec wait_for_completion_killable do_wait_for_common schedule - path B. async work kthread that brings in scsi device. It wants to register CPUHP states at some point, and it needs the CPU hot-plug lock for that, which is owned by zram. async_run_entry_fn scsi_probe_and_add_lun scsi_mq_alloc_queue blk_mq_init_queue blk_mq_init_allocated_queue blk_mq_realloc_hw_ctxs __cpuhp_state_add_instance __cpuhp_state_add_instance_cpuslocked mutex_lock schedule - path C. modprobe sleeps, waiting for all aync works to finish. load_module do_init_module async_synchronize_full async_synchronize_cookie_domain schedule [senozhatsky@chromium.org: add comment] Link: https://lkml.kernel.org/r/20220624060606.1014474-1-senozhatsky@chromium.org Link: https://lkml.kernel.org/r/20220622023501.517125-1-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
234 lines
5.7 KiB
C
234 lines
5.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/string.h>
|
|
#include <linux/err.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/crypto.h>
|
|
|
|
#include "zcomp.h"
|
|
|
|
static const char * const backends[] = {
|
|
#if IS_ENABLED(CONFIG_CRYPTO_LZO)
|
|
"lzo",
|
|
"lzo-rle",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
|
|
"lz4",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
|
|
"lz4hc",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_842)
|
|
"842",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
|
|
"zstd",
|
|
#endif
|
|
};
|
|
|
|
static void zcomp_strm_free(struct zcomp_strm *zstrm)
|
|
{
|
|
if (!IS_ERR_OR_NULL(zstrm->tfm))
|
|
crypto_free_comp(zstrm->tfm);
|
|
free_pages((unsigned long)zstrm->buffer, 1);
|
|
zstrm->tfm = NULL;
|
|
zstrm->buffer = NULL;
|
|
}
|
|
|
|
/*
|
|
* Initialize zcomp_strm structure with ->tfm initialized by backend, and
|
|
* ->buffer. Return a negative value on error.
|
|
*/
|
|
static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
|
|
{
|
|
zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
|
|
/*
|
|
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
|
|
* case when compressed size is larger than the original one
|
|
*/
|
|
zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
|
|
if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
|
|
zcomp_strm_free(zstrm);
|
|
return -ENOMEM;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
bool zcomp_available_algorithm(const char *comp)
|
|
{
|
|
/*
|
|
* Crypto does not ignore a trailing new line symbol,
|
|
* so make sure you don't supply a string containing
|
|
* one.
|
|
* This also means that we permit zcomp initialisation
|
|
* with any compressing algorithm known to crypto api.
|
|
*/
|
|
return crypto_has_comp(comp, 0, 0) == 1;
|
|
}
|
|
|
|
/* show available compressors */
|
|
ssize_t zcomp_available_show(const char *comp, char *buf)
|
|
{
|
|
bool known_algorithm = false;
|
|
ssize_t sz = 0;
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(backends); i++) {
|
|
if (!strcmp(comp, backends[i])) {
|
|
known_algorithm = true;
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
|
"[%s] ", backends[i]);
|
|
} else {
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
|
"%s ", backends[i]);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Out-of-tree module known to crypto api or a missing
|
|
* entry in `backends'.
|
|
*/
|
|
if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
|
"[%s] ", comp);
|
|
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
|
|
return sz;
|
|
}
|
|
|
|
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
|
|
{
|
|
local_lock(&comp->stream->lock);
|
|
return this_cpu_ptr(comp->stream);
|
|
}
|
|
|
|
void zcomp_stream_put(struct zcomp *comp)
|
|
{
|
|
local_unlock(&comp->stream->lock);
|
|
}
|
|
|
|
int zcomp_compress(struct zcomp_strm *zstrm,
|
|
const void *src, unsigned int *dst_len)
|
|
{
|
|
/*
|
|
* Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
|
|
* because sometimes we can endup having a bigger compressed data
|
|
* due to various reasons: for example compression algorithms tend
|
|
* to add some padding to the compressed buffer. Speaking of padding,
|
|
* comp algorithm `842' pads the compressed length to multiple of 8
|
|
* and returns -ENOSP when the dst memory is not big enough, which
|
|
* is not something that ZRAM wants to see. We can handle the
|
|
* `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
|
|
* receive -ERRNO from the compressing backend we can't help it
|
|
* anymore. To make `842' happy we need to tell the exact size of
|
|
* the dst buffer, zram_drv will take care of the fact that
|
|
* compressed buffer is too big.
|
|
*/
|
|
*dst_len = PAGE_SIZE * 2;
|
|
|
|
return crypto_comp_compress(zstrm->tfm,
|
|
src, PAGE_SIZE,
|
|
zstrm->buffer, dst_len);
|
|
}
|
|
|
|
int zcomp_decompress(struct zcomp_strm *zstrm,
|
|
const void *src, unsigned int src_len, void *dst)
|
|
{
|
|
unsigned int dst_len = PAGE_SIZE;
|
|
|
|
return crypto_comp_decompress(zstrm->tfm,
|
|
src, src_len,
|
|
dst, &dst_len);
|
|
}
|
|
|
|
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
|
|
{
|
|
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
|
|
struct zcomp_strm *zstrm;
|
|
int ret;
|
|
|
|
zstrm = per_cpu_ptr(comp->stream, cpu);
|
|
local_lock_init(&zstrm->lock);
|
|
|
|
ret = zcomp_strm_init(zstrm, comp);
|
|
if (ret)
|
|
pr_err("Can't allocate a compression stream\n");
|
|
return ret;
|
|
}
|
|
|
|
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
|
|
{
|
|
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
|
|
struct zcomp_strm *zstrm;
|
|
|
|
zstrm = per_cpu_ptr(comp->stream, cpu);
|
|
zcomp_strm_free(zstrm);
|
|
return 0;
|
|
}
|
|
|
|
static int zcomp_init(struct zcomp *comp)
|
|
{
|
|
int ret;
|
|
|
|
comp->stream = alloc_percpu(struct zcomp_strm);
|
|
if (!comp->stream)
|
|
return -ENOMEM;
|
|
|
|
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
|
|
if (ret < 0)
|
|
goto cleanup;
|
|
return 0;
|
|
|
|
cleanup:
|
|
free_percpu(comp->stream);
|
|
return ret;
|
|
}
|
|
|
|
void zcomp_destroy(struct zcomp *comp)
|
|
{
|
|
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
|
|
free_percpu(comp->stream);
|
|
kfree(comp);
|
|
}
|
|
|
|
/*
|
|
* search available compressors for requested algorithm.
|
|
* allocate new zcomp and initialize it. return compressing
|
|
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
|
|
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
|
|
* case of allocation error, or any other error potentially
|
|
* returned by zcomp_init().
|
|
*/
|
|
struct zcomp *zcomp_create(const char *compress)
|
|
{
|
|
struct zcomp *comp;
|
|
int error;
|
|
|
|
/*
|
|
* Crypto API will execute /sbin/modprobe if the compression module
|
|
* is not loaded yet. We must do it here, otherwise we are about to
|
|
* call /sbin/modprobe under CPU hot-plug lock.
|
|
*/
|
|
if (!zcomp_available_algorithm(compress))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
|
|
if (!comp)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
comp->name = compress;
|
|
error = zcomp_init(comp);
|
|
if (error) {
|
|
kfree(comp);
|
|
return ERR_PTR(error);
|
|
}
|
|
return comp;
|
|
}
|