2019-07-23 00:26:22 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
2020-11-14 05:19:15 +08:00
|
|
|
* Ioctl to enable verity on a file
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
|
|
|
* Copyright 2019 Google LLC
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "fsverity_private.h"
|
|
|
|
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
#include <crypto/hash.h>
|
2019-07-23 00:26:22 +08:00
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/sched/signal.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
2022-12-24 04:36:34 +08:00
|
|
|
struct block_buffer {
|
|
|
|
u32 filled;
|
2023-03-28 12:15:05 +08:00
|
|
|
bool is_root_hash;
|
2022-12-24 04:36:34 +08:00
|
|
|
u8 *data;
|
|
|
|
};
|
2020-01-07 04:54:10 +08:00
|
|
|
|
2022-12-24 04:36:34 +08:00
|
|
|
/* Hash a block, writing the result to the next level's pending block buffer. */
|
|
|
|
static int hash_one_block(struct inode *inode,
|
|
|
|
const struct merkle_tree_params *params,
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
struct block_buffer *cur)
|
2019-07-23 00:26:22 +08:00
|
|
|
{
|
2022-12-24 04:36:34 +08:00
|
|
|
struct block_buffer *next = cur + 1;
|
2019-07-23 00:26:22 +08:00
|
|
|
int err;
|
|
|
|
|
2023-03-28 12:15:05 +08:00
|
|
|
/*
|
|
|
|
* Safety check to prevent a buffer overflow in case of a filesystem bug
|
|
|
|
* that allows the file size to change despite deny_write_access(), or a
|
|
|
|
* bug in the Merkle tree logic itself
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-12-24 04:36:34 +08:00
|
|
|
/* Zero-pad the block if it's shorter than the block size. */
|
|
|
|
memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
|
2019-07-23 00:26:22 +08:00
|
|
|
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
err = fsverity_hash_block(params, inode, cur->data,
|
2022-12-24 04:36:34 +08:00
|
|
|
&next->data[next->filled]);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
next->filled += params->digest_size;
|
|
|
|
cur->filled = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2019-07-23 00:26:22 +08:00
|
|
|
|
2022-12-24 04:36:34 +08:00
|
|
|
static int write_merkle_tree_block(struct inode *inode, const u8 *buf,
|
|
|
|
unsigned long index,
|
|
|
|
const struct merkle_tree_params *params)
|
|
|
|
{
|
|
|
|
u64 pos = (u64)index << params->log_blocksize;
|
|
|
|
int err;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
2022-12-24 04:36:34 +08:00
|
|
|
err = inode->i_sb->s_vop->write_merkle_tree_block(inode, buf, pos,
|
|
|
|
params->block_size);
|
|
|
|
if (err)
|
|
|
|
fsverity_err(inode, "Error %d writing Merkle tree block %lu",
|
|
|
|
err, index);
|
|
|
|
return err;
|
2019-07-23 00:26:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-01-07 04:54:10 +08:00
|
|
|
* Build the Merkle tree for the given file using the given parameters, and
|
2019-07-23 00:26:22 +08:00
|
|
|
* return the root hash in @root_hash.
|
|
|
|
*
|
|
|
|
* The tree is written to a filesystem-specific location as determined by the
|
|
|
|
* ->write_merkle_tree_block() method. However, the blocks that comprise the
|
|
|
|
* tree are the same for all filesystems.
|
|
|
|
*/
|
2020-01-07 04:54:10 +08:00
|
|
|
static int build_merkle_tree(struct file *filp,
|
2019-07-23 00:26:22 +08:00
|
|
|
const struct merkle_tree_params *params,
|
|
|
|
u8 *root_hash)
|
|
|
|
{
|
2020-01-07 04:54:10 +08:00
|
|
|
struct inode *inode = file_inode(filp);
|
2022-12-24 04:36:34 +08:00
|
|
|
const u64 data_size = inode->i_size;
|
|
|
|
const int num_levels = params->num_levels;
|
|
|
|
struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {};
|
|
|
|
struct block_buffer *buffers = &_buffers[1];
|
|
|
|
unsigned long level_offset[FS_VERITY_MAX_LEVELS];
|
|
|
|
int level;
|
|
|
|
u64 offset;
|
|
|
|
int err;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
2022-12-24 04:36:34 +08:00
|
|
|
if (data_size == 0) {
|
2019-07-23 00:26:22 +08:00
|
|
|
/* Empty file is a special case; root hash is all 0's */
|
|
|
|
memset(root_hash, 0, params->digest_size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-24 04:36:34 +08:00
|
|
|
* Allocate the block buffers. Buffer "-1" is for data blocks.
|
|
|
|
* Buffers 0 <= level < num_levels are for the actual tree levels.
|
|
|
|
* Buffer 'num_levels' is for the root hash.
|
2019-07-23 00:26:22 +08:00
|
|
|
*/
|
2022-12-24 04:36:34 +08:00
|
|
|
for (level = -1; level < num_levels; level++) {
|
|
|
|
buffers[level].data = kzalloc(params->block_size, GFP_KERNEL);
|
|
|
|
if (!buffers[level].data) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
buffers[num_levels].data = root_hash;
|
2023-03-28 12:15:05 +08:00
|
|
|
buffers[num_levels].is_root_hash = true;
|
2022-12-24 04:36:34 +08:00
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start));
|
|
|
|
memcpy(level_offset, params->level_start, sizeof(level_offset));
|
|
|
|
|
|
|
|
/* Hash each data block, also hashing the tree blocks as they fill up */
|
|
|
|
for (offset = 0; offset < data_size; offset += params->block_size) {
|
|
|
|
ssize_t bytes_read;
|
|
|
|
loff_t pos = offset;
|
|
|
|
|
|
|
|
buffers[-1].filled = min_t(u64, params->block_size,
|
|
|
|
data_size - offset);
|
|
|
|
bytes_read = __kernel_read(filp, buffers[-1].data,
|
|
|
|
buffers[-1].filled, &pos);
|
|
|
|
if (bytes_read < 0) {
|
|
|
|
err = bytes_read;
|
|
|
|
fsverity_err(inode, "Error %d reading file data", err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (bytes_read != buffers[-1].filled) {
|
|
|
|
err = -EINVAL;
|
|
|
|
fsverity_err(inode, "Short read of file data");
|
|
|
|
goto out;
|
|
|
|
}
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
err = hash_one_block(inode, params, &buffers[-1]);
|
2019-07-23 00:26:22 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2022-12-24 04:36:34 +08:00
|
|
|
for (level = 0; level < num_levels; level++) {
|
|
|
|
if (buffers[level].filled + params->digest_size <=
|
|
|
|
params->block_size) {
|
|
|
|
/* Next block at @level isn't full yet */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Next block at @level is full */
|
|
|
|
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
err = hash_one_block(inode, params, &buffers[level]);
|
2022-12-24 04:36:34 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = write_merkle_tree_block(inode,
|
|
|
|
buffers[level].data,
|
|
|
|
level_offset[level],
|
|
|
|
params);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
level_offset[level]++;
|
|
|
|
}
|
|
|
|
if (fatal_signal_pending(current)) {
|
|
|
|
err = -EINTR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
/* Finish all nonempty pending tree blocks. */
|
|
|
|
for (level = 0; level < num_levels; level++) {
|
|
|
|
if (buffers[level].filled != 0) {
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
err = hash_one_block(inode, params, &buffers[level]);
|
2022-12-24 04:36:34 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = write_merkle_tree_block(inode,
|
|
|
|
buffers[level].data,
|
|
|
|
level_offset[level],
|
|
|
|
params);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* The root hash was filled by the last call to hash_one_block(). */
|
2023-03-28 12:03:26 +08:00
|
|
|
if (WARN_ON_ONCE(buffers[num_levels].filled != params->digest_size)) {
|
2022-12-24 04:36:34 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
2019-07-23 00:26:22 +08:00
|
|
|
}
|
|
|
|
err = 0;
|
|
|
|
out:
|
2022-12-24 04:36:34 +08:00
|
|
|
for (level = -1; level < num_levels; level++)
|
|
|
|
kfree(buffers[level].data);
|
2019-07-23 00:26:22 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int enable_verity(struct file *filp,
|
|
|
|
const struct fsverity_enable_arg *arg)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(filp);
|
|
|
|
const struct fsverity_operations *vops = inode->i_sb->s_vop;
|
|
|
|
struct merkle_tree_params params = { };
|
|
|
|
struct fsverity_descriptor *desc;
|
2022-05-19 10:24:50 +08:00
|
|
|
size_t desc_size = struct_size(desc, signature, arg->sig_size);
|
2019-07-23 00:26:22 +08:00
|
|
|
struct fsverity_info *vi;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Start initializing the fsverity_descriptor */
|
|
|
|
desc = kzalloc(desc_size, GFP_KERNEL);
|
|
|
|
if (!desc)
|
|
|
|
return -ENOMEM;
|
|
|
|
desc->version = 1;
|
|
|
|
desc->hash_algorithm = arg->hash_algorithm;
|
|
|
|
desc->log_blocksize = ilog2(arg->block_size);
|
|
|
|
|
|
|
|
/* Get the salt if the user provided one */
|
|
|
|
if (arg->salt_size &&
|
2020-01-01 01:54:08 +08:00
|
|
|
copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
|
2019-07-23 00:26:22 +08:00
|
|
|
arg->salt_size)) {
|
|
|
|
err = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
desc->salt_size = arg->salt_size;
|
|
|
|
|
2019-07-23 00:26:23 +08:00
|
|
|
/* Get the signature if the user provided one */
|
|
|
|
if (arg->sig_size &&
|
2020-01-01 01:54:08 +08:00
|
|
|
copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
|
2019-07-23 00:26:23 +08:00
|
|
|
arg->sig_size)) {
|
|
|
|
err = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
desc->sig_size = cpu_to_le32(arg->sig_size);
|
|
|
|
|
2019-07-23 00:26:22 +08:00
|
|
|
desc->data_size = cpu_to_le64(inode->i_size);
|
|
|
|
|
|
|
|
/* Prepare the Merkle tree parameters */
|
|
|
|
err = fsverity_init_merkle_tree_params(¶ms, inode,
|
|
|
|
arg->hash_algorithm,
|
|
|
|
desc->log_blocksize,
|
|
|
|
desc->salt, desc->salt_size);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start enabling verity on this file, serialized by the inode lock.
|
|
|
|
* Fail if verity is already enabled or is already being enabled.
|
|
|
|
*/
|
|
|
|
inode_lock(inode);
|
|
|
|
if (IS_VERITY(inode))
|
|
|
|
err = -EEXIST;
|
|
|
|
else
|
|
|
|
err = vops->begin_enable_verity(filp);
|
|
|
|
inode_unlock(inode);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build the Merkle tree. Don't hold the inode lock during this, since
|
|
|
|
* on huge files this may take a very long time and we don't want to
|
|
|
|
* force unrelated syscalls like chown() to block forever. We don't
|
|
|
|
* need the inode lock here because deny_write_access() already prevents
|
|
|
|
* the file from being written to or truncated, and we still serialize
|
|
|
|
* ->begin_enable_verity() and ->end_enable_verity() using the inode
|
|
|
|
* lock and only allow one process to be here at a time on a given file.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
|
2020-01-07 04:54:10 +08:00
|
|
|
err = build_merkle_tree(filp, ¶ms, desc->root_hash);
|
2019-07-23 00:26:22 +08:00
|
|
|
if (err) {
|
|
|
|
fsverity_err(inode, "Error %d building Merkle tree", err);
|
|
|
|
goto rollback;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create the fsverity_info. Don't bother trying to save work by
|
|
|
|
* reusing the merkle_tree_params from above. Instead, just create the
|
|
|
|
* fsverity_info from the fsverity_descriptor as if it were just loaded
|
|
|
|
* from disk. This is simpler, and it serves as an extra check that the
|
|
|
|
* metadata we're writing is valid before actually enabling verity.
|
|
|
|
*/
|
2022-05-18 21:22:56 +08:00
|
|
|
vi = fsverity_create_info(inode, desc);
|
2019-07-23 00:26:22 +08:00
|
|
|
if (IS_ERR(vi)) {
|
|
|
|
err = PTR_ERR(vi);
|
|
|
|
goto rollback;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the filesystem to finish enabling verity on the file.
|
|
|
|
* Serialized with ->begin_enable_verity() by the inode lock.
|
|
|
|
*/
|
|
|
|
inode_lock(inode);
|
|
|
|
err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size);
|
|
|
|
inode_unlock(inode);
|
|
|
|
if (err) {
|
|
|
|
fsverity_err(inode, "%ps() failed with err %d",
|
|
|
|
vops->end_enable_verity, err);
|
|
|
|
fsverity_free_info(vi);
|
2023-03-28 12:03:26 +08:00
|
|
|
} else if (WARN_ON_ONCE(!IS_VERITY(inode))) {
|
2019-07-23 00:26:22 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
fsverity_free_info(vi);
|
|
|
|
} else {
|
|
|
|
/* Successfully enabled verity */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Readers can start using ->i_verity_info immediately, so it
|
|
|
|
* can't be rolled back once set. So don't set it until just
|
|
|
|
* after the filesystem has successfully enabled verity.
|
|
|
|
*/
|
|
|
|
fsverity_set_info(inode, vi);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
kfree(params.hashstate);
|
|
|
|
kfree(desc);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
rollback:
|
|
|
|
inode_lock(inode);
|
|
|
|
(void)vops->end_enable_verity(filp, NULL, 0, params.tree_size);
|
|
|
|
inode_unlock(inode);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsverity_ioctl_enable() - enable verity on a file
|
2020-05-12 03:21:17 +08:00
|
|
|
* @filp: file to enable verity on
|
|
|
|
* @uarg: user pointer to fsverity_enable_arg
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
|
|
|
* Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of
|
|
|
|
* Documentation/filesystems/fsverity.rst for the documentation.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno on failure
|
|
|
|
*/
|
|
|
|
int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(filp);
|
|
|
|
struct fsverity_enable_arg arg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (copy_from_user(&arg, uarg, sizeof(arg)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (arg.version != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (arg.__reserved1 ||
|
|
|
|
memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-12-24 04:36:34 +08:00
|
|
|
if (!is_power_of_2(arg.block_size))
|
2019-07-23 00:26:22 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-12-10 02:31:43 +08:00
|
|
|
if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
|
2019-07-23 00:26:22 +08:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2019-07-23 00:26:23 +08:00
|
|
|
if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
|
|
|
|
return -EMSGSIZE;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Require a regular file with write access. But the actual fd must
|
|
|
|
* still be readonly so that we can lock out all writers. This is
|
|
|
|
* needed to guarantee that no writable fds exist to the file once it
|
|
|
|
* has verity enabled, and to stabilize the data being hashed.
|
|
|
|
*/
|
|
|
|
|
2021-01-21 21:19:22 +08:00
|
|
|
err = file_permission(filp, MAY_WRITE);
|
2019-07-23 00:26:22 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2023-04-07 05:31:11 +08:00
|
|
|
/*
|
|
|
|
* __kernel_read() is used while building the Merkle tree. So, we can't
|
|
|
|
* allow file descriptors that were opened for ioctl access only, using
|
|
|
|
* the special nonstandard access mode 3. O_RDONLY only, please!
|
|
|
|
*/
|
|
|
|
if (!(filp->f_mode & FMODE_READ))
|
|
|
|
return -EBADF;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
|
|
|
if (IS_APPEND(inode))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
return -EISDIR;
|
|
|
|
|
|
|
|
if (!S_ISREG(inode->i_mode))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = mnt_want_write_file(filp);
|
|
|
|
if (err) /* -EROFS */
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = deny_write_access(filp);
|
|
|
|
if (err) /* -ETXTBSY */
|
|
|
|
goto out_drop_write;
|
|
|
|
|
|
|
|
err = enable_verity(filp, &arg);
|
|
|
|
|
|
|
|
/*
|
2023-03-15 07:31:32 +08:00
|
|
|
* We no longer drop the inode's pagecache after enabling verity. This
|
|
|
|
* used to be done to try to avoid a race condition where pages could be
|
|
|
|
* evicted after being used in the Merkle tree construction, then
|
|
|
|
* re-instantiated by a concurrent read. Such pages are unverified, and
|
|
|
|
* the backing storage could have filled them with different content, so
|
|
|
|
* they shouldn't be used to fulfill reads once verity is enabled.
|
|
|
|
*
|
|
|
|
* But, dropping the pagecache has a big performance impact, and it
|
|
|
|
* doesn't fully solve the race condition anyway. So for those reasons,
|
|
|
|
* and also because this race condition isn't very important relatively
|
|
|
|
* speaking (especially for small-ish files, where the chance of a page
|
|
|
|
* being used, evicted, *and* re-instantiated all while enabling verity
|
|
|
|
* is quite small), we no longer drop the inode's pagecache.
|
2019-07-23 00:26:22 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allow_write_access() is needed to pair with deny_write_access().
|
|
|
|
* Regardless, the filesystem won't allow writing to verity files.
|
|
|
|
*/
|
|
|
|
allow_write_access(filp);
|
|
|
|
out_drop_write:
|
|
|
|
mnt_drop_write_file(filp);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
|