2019-07-23 00:26:22 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
2022-03-24 09:29:04 +08:00
|
|
|
* Data verification functions, i.e. hooks for ->readahead()
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
|
|
|
* Copyright 2019 Google LLC
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "fsverity_private.h"
|
|
|
|
|
|
|
|
#include <crypto/hash.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
|
|
|
|
static struct workqueue_struct *fsverity_read_workqueue;
|
|
|
|
|
2022-12-24 04:36:33 +08:00
|
|
|
/*
|
|
|
|
* Returns true if the hash block with index @hblock_idx in the tree, located in
|
|
|
|
* @hpage, has already been verified.
|
|
|
|
*/
|
|
|
|
static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
|
|
|
|
unsigned long hblock_idx)
|
|
|
|
{
|
|
|
|
unsigned int blocks_per_page;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the Merkle tree block size and page size are the same, then the
|
|
|
|
* ->hash_block_verified bitmap isn't allocated, and we use PG_checked
|
|
|
|
* to directly indicate whether the page's block has been verified.
|
|
|
|
*
|
|
|
|
* Using PG_checked also guarantees that we re-verify hash pages that
|
|
|
|
* get evicted and re-instantiated from the backing storage, as new
|
|
|
|
* pages always start out with PG_checked cleared.
|
|
|
|
*/
|
|
|
|
if (!vi->hash_block_verified)
|
|
|
|
return PageChecked(hpage);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the Merkle tree block size and page size differ, we use a bitmap
|
|
|
|
* to indicate whether each hash block has been verified.
|
|
|
|
*
|
|
|
|
* However, we still need to ensure that hash pages that get evicted and
|
|
|
|
* re-instantiated from the backing storage are re-verified. To do
|
|
|
|
* this, we use PG_checked again, but now it doesn't really mean
|
|
|
|
* "checked". Instead, now it just serves as an indicator for whether
|
2024-02-01 13:28:13 +08:00
|
|
|
* the hash page is newly instantiated or not. If the page is new, as
|
|
|
|
* indicated by PG_checked=0, we clear the bitmap bits for the page's
|
|
|
|
* blocks since they are untrustworthy, then set PG_checked=1.
|
|
|
|
* Otherwise we return the bitmap bit for the requested block.
|
2022-12-24 04:36:33 +08:00
|
|
|
*
|
2024-02-01 13:28:13 +08:00
|
|
|
* Multiple threads may execute this code concurrently on the same page.
|
|
|
|
* This is safe because we use memory barriers to ensure that if a
|
|
|
|
* thread sees PG_checked=1, then it also sees the associated bitmap
|
|
|
|
* clearing to have occurred. Also, all writes and their corresponding
|
|
|
|
* reads are atomic, and all writes are safe to repeat in the event that
|
|
|
|
* multiple threads get into the PG_checked=0 section. (Clearing a
|
|
|
|
* bitmap bit again at worst causes a hash block to be verified
|
|
|
|
* redundantly. That event should be very rare, so it's not worth using
|
|
|
|
* a lock to avoid. Setting PG_checked again has no effect.)
|
2022-12-24 04:36:33 +08:00
|
|
|
*/
|
|
|
|
if (PageChecked(hpage)) {
|
|
|
|
/*
|
|
|
|
* A read memory barrier is needed here to give ACQUIRE
|
|
|
|
* semantics to the above PageChecked() test.
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
return test_bit(hblock_idx, vi->hash_block_verified);
|
|
|
|
}
|
2024-02-01 13:28:13 +08:00
|
|
|
blocks_per_page = vi->tree_params.blocks_per_page;
|
|
|
|
hblock_idx = round_down(hblock_idx, blocks_per_page);
|
|
|
|
for (i = 0; i < blocks_per_page; i++)
|
|
|
|
clear_bit(hblock_idx + i, vi->hash_block_verified);
|
|
|
|
/*
|
|
|
|
* A write memory barrier is needed here to give RELEASE semantics to
|
|
|
|
* the below SetPageChecked() operation.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
SetPageChecked(hpage);
|
|
|
|
return false;
|
2022-12-24 04:36:33 +08:00
|
|
|
}
|
|
|
|
|
2019-07-23 00:26:22 +08:00
|
|
|
/*
|
2022-12-24 04:36:33 +08:00
|
|
|
* Verify a single data block against the file's Merkle tree.
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
|
|
|
* In principle, we need to verify the entire path to the root node. However,
|
2022-12-24 04:36:33 +08:00
|
|
|
* for efficiency the filesystem may cache the hash blocks. Therefore we need
|
|
|
|
* only ascend the tree until an already-verified hash block is seen, and then
|
|
|
|
* verify the path to that block.
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
2022-12-24 04:36:33 +08:00
|
|
|
* Return: %true if the data block is valid, else %false.
|
2019-07-23 00:26:22 +08:00
|
|
|
*/
|
2022-12-24 04:36:33 +08:00
|
|
|
static bool
|
|
|
|
verify_data_block(struct inode *inode, struct fsverity_info *vi,
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
const void *data, u64 data_pos, unsigned long max_ra_pages)
|
2019-07-23 00:26:22 +08:00
|
|
|
{
|
|
|
|
const struct merkle_tree_params *params = &vi->tree_params;
|
|
|
|
const unsigned int hsize = params->digest_size;
|
|
|
|
int level;
|
|
|
|
u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE];
|
|
|
|
const u8 *want_hash;
|
|
|
|
u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE];
|
2022-12-24 04:36:33 +08:00
|
|
|
/* The hash blocks that are traversed, indexed by level */
|
|
|
|
struct {
|
|
|
|
/* Page containing the hash block */
|
|
|
|
struct page *page;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
/* Mapped address of the hash block (will be within @page) */
|
|
|
|
const void *addr;
|
2022-12-24 04:36:33 +08:00
|
|
|
/* Index of the hash block in the tree overall */
|
|
|
|
unsigned long index;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
/* Byte offset of the wanted hash relative to @addr */
|
2022-12-24 04:36:33 +08:00
|
|
|
unsigned int hoffset;
|
|
|
|
} hblocks[FS_VERITY_MAX_LEVELS];
|
|
|
|
/*
|
|
|
|
* The index of the previous level's block within that level; also the
|
|
|
|
* index of that block's hash within the current level.
|
|
|
|
*/
|
|
|
|
u64 hidx = data_pos >> params->log_blocksize;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
/* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */
|
|
|
|
BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX);
|
|
|
|
|
2022-12-24 04:36:33 +08:00
|
|
|
if (unlikely(data_pos >= inode->i_size)) {
|
|
|
|
/*
|
|
|
|
* This can happen in the data page spanning EOF when the Merkle
|
|
|
|
* tree block size is less than the page size. The Merkle tree
|
|
|
|
* doesn't cover data blocks fully past EOF. But the entire
|
|
|
|
* page spanning EOF can be visible to userspace via a mmap, and
|
|
|
|
* any part past EOF should be all zeroes. Therefore, we need
|
|
|
|
* to verify that any data blocks fully past EOF are all zeroes.
|
|
|
|
*/
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
if (memchr_inv(data, 0, params->block_size)) {
|
|
|
|
fsverity_err(inode,
|
|
|
|
"FILE CORRUPTED! Data past EOF is not zeroed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2022-12-24 04:36:33 +08:00
|
|
|
}
|
2019-07-23 00:26:22 +08:00
|
|
|
|
|
|
|
/*
|
2022-12-24 04:36:33 +08:00
|
|
|
* Starting at the leaf level, ascend the tree saving hash blocks along
|
|
|
|
* the way until we find a hash block that has already been verified, or
|
|
|
|
* until we reach the root.
|
2019-07-23 00:26:22 +08:00
|
|
|
*/
|
|
|
|
for (level = 0; level < params->num_levels; level++) {
|
2022-12-24 04:36:33 +08:00
|
|
|
unsigned long next_hidx;
|
|
|
|
unsigned long hblock_idx;
|
|
|
|
pgoff_t hpage_idx;
|
|
|
|
unsigned int hblock_offset_in_page;
|
2019-07-23 00:26:22 +08:00
|
|
|
unsigned int hoffset;
|
|
|
|
struct page *hpage;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
const void *haddr;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
2022-12-24 04:36:33 +08:00
|
|
|
/*
|
|
|
|
* The index of the block in the current level; also the index
|
|
|
|
* of that block's hash within the next level.
|
|
|
|
*/
|
|
|
|
next_hidx = hidx >> params->log_arity;
|
|
|
|
|
|
|
|
/* Index of the hash block in the tree overall */
|
|
|
|
hblock_idx = params->level_start[level] + next_hidx;
|
|
|
|
|
|
|
|
/* Index of the hash page in the tree overall */
|
|
|
|
hpage_idx = hblock_idx >> params->log_blocks_per_page;
|
|
|
|
|
|
|
|
/* Byte offset of the hash block within the page */
|
|
|
|
hblock_offset_in_page =
|
|
|
|
(hblock_idx << params->log_blocksize) & ~PAGE_MASK;
|
|
|
|
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
/* Byte offset of the hash within the block */
|
|
|
|
hoffset = (hidx << params->log_digestsize) &
|
|
|
|
(params->block_size - 1);
|
2019-07-23 00:26:22 +08:00
|
|
|
|
2022-12-24 04:36:33 +08:00
|
|
|
hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
|
|
|
|
hpage_idx, level == 0 ? min(max_ra_pages,
|
|
|
|
params->tree_pages - hpage_idx) : 0);
|
2019-07-23 00:26:22 +08:00
|
|
|
if (IS_ERR(hpage)) {
|
|
|
|
fsverity_err(inode,
|
2023-06-04 10:23:12 +08:00
|
|
|
"Error %ld reading Merkle tree page %lu",
|
|
|
|
PTR_ERR(hpage), hpage_idx);
|
|
|
|
goto error;
|
2019-07-23 00:26:22 +08:00
|
|
|
}
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
haddr = kmap_local_page(hpage) + hblock_offset_in_page;
|
2022-12-24 04:36:33 +08:00
|
|
|
if (is_hash_block_verified(vi, hpage, hblock_idx)) {
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
memcpy(_want_hash, haddr + hoffset, hsize);
|
2019-07-23 00:26:22 +08:00
|
|
|
want_hash = _want_hash;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
kunmap_local(haddr);
|
2019-07-23 00:26:22 +08:00
|
|
|
put_page(hpage);
|
|
|
|
goto descend;
|
|
|
|
}
|
2022-12-24 04:36:33 +08:00
|
|
|
hblocks[level].page = hpage;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
hblocks[level].addr = haddr;
|
2022-12-24 04:36:33 +08:00
|
|
|
hblocks[level].index = hblock_idx;
|
|
|
|
hblocks[level].hoffset = hoffset;
|
|
|
|
hidx = next_hidx;
|
2019-07-23 00:26:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
want_hash = vi->root_hash;
|
|
|
|
descend:
|
2022-12-24 04:36:32 +08:00
|
|
|
/* Descend the tree verifying hash blocks. */
|
2019-07-23 00:26:22 +08:00
|
|
|
for (; level > 0; level--) {
|
2022-12-24 04:36:33 +08:00
|
|
|
struct page *hpage = hblocks[level - 1].page;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
const void *haddr = hblocks[level - 1].addr;
|
2022-12-24 04:36:33 +08:00
|
|
|
unsigned long hblock_idx = hblocks[level - 1].index;
|
|
|
|
unsigned int hoffset = hblocks[level - 1].hoffset;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
2023-06-04 10:23:12 +08:00
|
|
|
if (fsverity_hash_block(params, inode, haddr, real_hash) != 0)
|
|
|
|
goto error;
|
|
|
|
if (memcmp(want_hash, real_hash, hsize) != 0)
|
|
|
|
goto corrupted;
|
2022-12-24 04:36:33 +08:00
|
|
|
/*
|
|
|
|
* Mark the hash block as verified. This must be atomic and
|
|
|
|
* idempotent, as the same hash block might be verified by
|
|
|
|
* multiple threads concurrently.
|
|
|
|
*/
|
|
|
|
if (vi->hash_block_verified)
|
|
|
|
set_bit(hblock_idx, vi->hash_block_verified);
|
|
|
|
else
|
|
|
|
SetPageChecked(hpage);
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
memcpy(_want_hash, haddr + hoffset, hsize);
|
2019-07-23 00:26:22 +08:00
|
|
|
want_hash = _want_hash;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
kunmap_local(haddr);
|
2019-07-23 00:26:22 +08:00
|
|
|
put_page(hpage);
|
|
|
|
}
|
|
|
|
|
2022-12-24 04:36:32 +08:00
|
|
|
/* Finally, verify the data block. */
|
2023-06-04 10:23:12 +08:00
|
|
|
if (fsverity_hash_block(params, inode, data, real_hash) != 0)
|
|
|
|
goto error;
|
|
|
|
if (memcmp(want_hash, real_hash, hsize) != 0)
|
|
|
|
goto corrupted;
|
|
|
|
return true;
|
|
|
|
|
|
|
|
corrupted:
|
|
|
|
fsverity_err(inode,
|
|
|
|
"FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN",
|
|
|
|
data_pos, level - 1,
|
|
|
|
params->hash_alg->name, hsize, want_hash,
|
|
|
|
params->hash_alg->name, hsize, real_hash);
|
|
|
|
error:
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
for (; level > 0; level--) {
|
|
|
|
kunmap_local(hblocks[level - 1].addr);
|
2022-12-24 04:36:33 +08:00
|
|
|
put_page(hblocks[level - 1].page);
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
}
|
2023-06-04 10:23:12 +08:00
|
|
|
return false;
|
2019-07-23 00:26:22 +08:00
|
|
|
}
|
|
|
|
|
2022-12-24 04:36:33 +08:00
|
|
|
static bool
|
2023-06-04 10:21:01 +08:00
|
|
|
verify_data_blocks(struct folio *data_folio, size_t len, size_t offset,
|
|
|
|
unsigned long max_ra_pages)
|
2022-12-24 04:36:33 +08:00
|
|
|
{
|
2023-06-04 10:21:01 +08:00
|
|
|
struct inode *inode = data_folio->mapping->host;
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
struct fsverity_info *vi = inode->i_verity_info;
|
2022-12-24 04:36:33 +08:00
|
|
|
const unsigned int block_size = vi->tree_params.block_size;
|
2023-01-28 06:15:29 +08:00
|
|
|
u64 pos = (u64)data_folio->index << PAGE_SHIFT;
|
2022-12-24 04:36:33 +08:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size)))
|
|
|
|
return false;
|
2023-01-28 06:15:29 +08:00
|
|
|
if (WARN_ON_ONCE(!folio_test_locked(data_folio) ||
|
|
|
|
folio_test_uptodate(data_folio)))
|
2022-12-24 04:36:33 +08:00
|
|
|
return false;
|
|
|
|
do {
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
void *data;
|
|
|
|
bool valid;
|
|
|
|
|
|
|
|
data = kmap_local_folio(data_folio, offset);
|
|
|
|
valid = verify_data_block(inode, vi, data, pos + offset,
|
|
|
|
max_ra_pages);
|
|
|
|
kunmap_local(data);
|
|
|
|
if (!valid)
|
2022-12-24 04:36:33 +08:00
|
|
|
return false;
|
|
|
|
offset += block_size;
|
|
|
|
len -= block_size;
|
|
|
|
} while (len);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-23 00:26:22 +08:00
|
|
|
/**
|
2023-01-28 06:15:29 +08:00
|
|
|
* fsverity_verify_blocks() - verify data in a folio
|
|
|
|
* @folio: the folio containing the data to verify
|
|
|
|
* @len: the length of the data to verify in the folio
|
|
|
|
* @offset: the offset of the data to verify in the folio
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
2022-12-24 04:36:33 +08:00
|
|
|
* Verify data that has just been read from a verity file. The data must be
|
2023-01-28 06:15:29 +08:00
|
|
|
* located in a pagecache folio that is still locked and not yet uptodate. The
|
2022-12-24 04:36:33 +08:00
|
|
|
* length and offset of the data must be Merkle tree block size aligned.
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
2022-12-24 04:36:33 +08:00
|
|
|
* Return: %true if the data is valid, else %false.
|
2019-07-23 00:26:22 +08:00
|
|
|
*/
|
2023-01-28 06:15:29 +08:00
|
|
|
bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
|
2019-07-23 00:26:22 +08:00
|
|
|
{
|
2023-06-04 10:21:01 +08:00
|
|
|
return verify_data_blocks(folio, len, offset, 0);
|
2019-07-23 00:26:22 +08:00
|
|
|
}
|
2022-12-24 04:36:33 +08:00
|
|
|
EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
|
2019-07-23 00:26:22 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
/**
|
|
|
|
* fsverity_verify_bio() - verify a 'read' bio that has just completed
|
2020-05-12 03:21:17 +08:00
|
|
|
* @bio: the bio to verify
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
2022-12-24 04:36:33 +08:00
|
|
|
* Verify the bio's data against the file's Merkle tree. All bio data segments
|
|
|
|
* must be aligned to the file's Merkle tree block size. If any data fails
|
|
|
|
* verification, then bio->bi_status is set to an error status.
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
2022-03-24 09:29:04 +08:00
|
|
|
* This is a helper function for use by the ->readahead() method of filesystems
|
2019-07-23 00:26:22 +08:00
|
|
|
* that issue bios to read data directly into the page cache. Filesystems that
|
|
|
|
* populate the page cache without issuing bios (e.g. non block-based
|
|
|
|
* filesystems) must instead call fsverity_verify_page() directly on each page.
|
|
|
|
* All filesystems must also call fsverity_verify_page() on holes.
|
|
|
|
*/
|
|
|
|
void fsverity_verify_bio(struct bio *bio)
|
|
|
|
{
|
2023-01-28 06:15:29 +08:00
|
|
|
struct folio_iter fi;
|
fs-verity: implement readahead of Merkle tree pages
When fs-verity verifies data pages, currently it reads each Merkle tree
page synchronously using read_mapping_page().
Therefore, when the Merkle tree pages aren't already cached, fs-verity
causes an extra 4 KiB I/O request for every 512 KiB of data (assuming
that the Merkle tree uses SHA-256 and 4 KiB blocks). This results in
more I/O requests and performance loss than is strictly necessary.
Therefore, implement readahead of the Merkle tree pages.
For simplicity, we take advantage of the fact that the kernel already
does readahead of the file's *data*, just like it does for any other
file. Due to this, we don't really need a separate readahead state
(struct file_ra_state) just for the Merkle tree, but rather we just need
to piggy-back on the existing data readahead requests.
We also only really need to bother with the first level of the Merkle
tree, since the usual fan-out factor is 128, so normally over 99% of
Merkle tree I/O requests are for the first level.
Therefore, make fsverity_verify_bio() enable readahead of the first
Merkle tree level, for up to 1/4 the number of pages in the bio, when it
sees that the REQ_RAHEAD flag is set on the bio. The readahead size is
then passed down to ->read_merkle_tree_page() for the filesystem to
(optionally) implement if it sees that the requested page is uncached.
While we're at it, also make build_merkle_tree_level() set the Merkle
tree readahead size, since it's easy to do there.
However, for now don't set the readahead size in fsverity_verify_page(),
since currently it's only used to verify holes on ext4 and f2fs, and it
would need parameters added to know how much to read ahead.
This patch significantly improves fs-verity sequential read performance.
Some quick benchmarks with 'cat'-ing a 250MB file after dropping caches:
On an ARM64 phone (using sha256-ce):
Before: 217 MB/s
After: 263 MB/s
(compare to sha256sum of non-verity file: 357 MB/s)
In an x86_64 VM (using sha256-avx2):
Before: 173 MB/s
After: 215 MB/s
(compare to sha256sum of non-verity file: 223 MB/s)
Link: https://lore.kernel.org/r/20200106205533.137005-1-ebiggers@kernel.org
Reviewed-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-01-07 04:55:33 +08:00
|
|
|
unsigned long max_ra_pages = 0;
|
2019-07-23 00:26:22 +08:00
|
|
|
|
fs-verity: implement readahead of Merkle tree pages
When fs-verity verifies data pages, currently it reads each Merkle tree
page synchronously using read_mapping_page().
Therefore, when the Merkle tree pages aren't already cached, fs-verity
causes an extra 4 KiB I/O request for every 512 KiB of data (assuming
that the Merkle tree uses SHA-256 and 4 KiB blocks). This results in
more I/O requests and performance loss than is strictly necessary.
Therefore, implement readahead of the Merkle tree pages.
For simplicity, we take advantage of the fact that the kernel already
does readahead of the file's *data*, just like it does for any other
file. Due to this, we don't really need a separate readahead state
(struct file_ra_state) just for the Merkle tree, but rather we just need
to piggy-back on the existing data readahead requests.
We also only really need to bother with the first level of the Merkle
tree, since the usual fan-out factor is 128, so normally over 99% of
Merkle tree I/O requests are for the first level.
Therefore, make fsverity_verify_bio() enable readahead of the first
Merkle tree level, for up to 1/4 the number of pages in the bio, when it
sees that the REQ_RAHEAD flag is set on the bio. The readahead size is
then passed down to ->read_merkle_tree_page() for the filesystem to
(optionally) implement if it sees that the requested page is uncached.
While we're at it, also make build_merkle_tree_level() set the Merkle
tree readahead size, since it's easy to do there.
However, for now don't set the readahead size in fsverity_verify_page(),
since currently it's only used to verify holes on ext4 and f2fs, and it
would need parameters added to know how much to read ahead.
This patch significantly improves fs-verity sequential read performance.
Some quick benchmarks with 'cat'-ing a 250MB file after dropping caches:
On an ARM64 phone (using sha256-ce):
Before: 217 MB/s
After: 263 MB/s
(compare to sha256sum of non-verity file: 357 MB/s)
In an x86_64 VM (using sha256-avx2):
Before: 173 MB/s
After: 215 MB/s
(compare to sha256sum of non-verity file: 223 MB/s)
Link: https://lore.kernel.org/r/20200106205533.137005-1-ebiggers@kernel.org
Reviewed-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-01-07 04:55:33 +08:00
|
|
|
if (bio->bi_opf & REQ_RAHEAD) {
|
|
|
|
/*
|
|
|
|
* If this bio is for data readahead, then we also do readahead
|
|
|
|
* of the first (largest) level of the Merkle tree. Namely,
|
|
|
|
* when a Merkle tree page is read, we also try to piggy-back on
|
|
|
|
* some additional pages -- up to 1/4 the number of data pages.
|
|
|
|
*
|
|
|
|
* This improves sequential read performance, as it greatly
|
|
|
|
* reduces the number of I/O requests made to the Merkle tree.
|
|
|
|
*/
|
2022-12-24 04:36:29 +08:00
|
|
|
max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
|
fs-verity: implement readahead of Merkle tree pages
When fs-verity verifies data pages, currently it reads each Merkle tree
page synchronously using read_mapping_page().
Therefore, when the Merkle tree pages aren't already cached, fs-verity
causes an extra 4 KiB I/O request for every 512 KiB of data (assuming
that the Merkle tree uses SHA-256 and 4 KiB blocks). This results in
more I/O requests and performance loss than is strictly necessary.
Therefore, implement readahead of the Merkle tree pages.
For simplicity, we take advantage of the fact that the kernel already
does readahead of the file's *data*, just like it does for any other
file. Due to this, we don't really need a separate readahead state
(struct file_ra_state) just for the Merkle tree, but rather we just need
to piggy-back on the existing data readahead requests.
We also only really need to bother with the first level of the Merkle
tree, since the usual fan-out factor is 128, so normally over 99% of
Merkle tree I/O requests are for the first level.
Therefore, make fsverity_verify_bio() enable readahead of the first
Merkle tree level, for up to 1/4 the number of pages in the bio, when it
sees that the REQ_RAHEAD flag is set on the bio. The readahead size is
then passed down to ->read_merkle_tree_page() for the filesystem to
(optionally) implement if it sees that the requested page is uncached.
While we're at it, also make build_merkle_tree_level() set the Merkle
tree readahead size, since it's easy to do there.
However, for now don't set the readahead size in fsverity_verify_page(),
since currently it's only used to verify holes on ext4 and f2fs, and it
would need parameters added to know how much to read ahead.
This patch significantly improves fs-verity sequential read performance.
Some quick benchmarks with 'cat'-ing a 250MB file after dropping caches:
On an ARM64 phone (using sha256-ce):
Before: 217 MB/s
After: 263 MB/s
(compare to sha256sum of non-verity file: 357 MB/s)
In an x86_64 VM (using sha256-avx2):
Before: 173 MB/s
After: 215 MB/s
(compare to sha256sum of non-verity file: 223 MB/s)
Link: https://lore.kernel.org/r/20200106205533.137005-1-ebiggers@kernel.org
Reviewed-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-01-07 04:55:33 +08:00
|
|
|
}
|
|
|
|
|
2023-01-28 06:15:29 +08:00
|
|
|
bio_for_each_folio_all(fi, bio) {
|
2023-06-04 10:21:01 +08:00
|
|
|
if (!verify_data_blocks(fi.folio, fi.length, fi.offset,
|
fsverity: use shash API instead of ahash API
The "ahash" API, like the other scatterlist-based crypto APIs such as
"skcipher", comes with some well-known limitations. First, it can't
easily be used with vmalloc addresses. Second, the request struct can't
be allocated on the stack. This adds complexity and a possible failure
point that needs to be worked around, e.g. using a mempool.
The only benefit of ahash over "shash" is that ahash is needed to access
traditional memory-to-memory crypto accelerators, i.e. drivers/crypto/.
However, this style of crypto acceleration has largely fallen out of
favor and been superseded by CPU-based acceleration or inline crypto
engines. Also, ahash needs to be used asynchronously to take full
advantage of such hardware, but fs/verity/ has never done this.
On all systems that aren't actually using one of these ahash-only crypto
accelerators, ahash just adds unnecessary overhead as it sits between
the user and the underlying shash algorithms.
Also, XFS is planned to cache fsverity Merkle tree blocks in the
existing XFS buffer cache. As a result, it will be possible for a
single Merkle tree block to be split across discontiguous pages
(https://lore.kernel.org/r/20230405233753.GU3223426@dread.disaster.area).
This data will need to be hashed. It is easiest to work with a vmapped
address in this case. However, ahash is incompatible with this.
Therefore, let's convert fs/verity/ from ahash to shash. This
simplifies the code, and it should also slightly improve performance for
everyone who wasn't actually using one of these ahash-only crypto
accelerators, i.e. almost everyone (or maybe even everyone)!
Link: https://lore.kernel.org/r/20230516052306.99600-1-ebiggers@kernel.org
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2023-05-16 13:12:16 +08:00
|
|
|
max_ra_pages)) {
|
2022-11-29 15:04:01 +08:00
|
|
|
bio->bi_status = BLK_STS_IOERR;
|
|
|
|
break;
|
|
|
|
}
|
2019-07-23 00:26:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fsverity_verify_bio);
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
|
2020-05-12 03:21:17 +08:00
|
|
|
* @work: the work to enqueue
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
|
|
|
* Enqueue verification work for asynchronous processing.
|
|
|
|
*/
|
|
|
|
void fsverity_enqueue_verify_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
queue_work(fsverity_read_workqueue, work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
|
|
|
|
|
2023-07-06 05:27:42 +08:00
|
|
|
void __init fsverity_init_workqueue(void)
|
2019-07-23 00:26:22 +08:00
|
|
|
{
|
|
|
|
/*
|
2023-03-11 03:33:25 +08:00
|
|
|
* Use a high-priority workqueue to prioritize verification work, which
|
|
|
|
* blocks reads from completing, over regular application tasks.
|
2019-07-23 00:26:22 +08:00
|
|
|
*
|
2023-03-11 03:33:25 +08:00
|
|
|
* For performance reasons, don't use an unbound workqueue. Using an
|
|
|
|
* unbound workqueue for crypto operations causes excessive scheduler
|
|
|
|
* latency on ARM64.
|
2019-07-23 00:26:22 +08:00
|
|
|
*/
|
|
|
|
fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
|
2023-03-11 03:33:25 +08:00
|
|
|
WQ_HIGHPRI,
|
2019-07-23 00:26:22 +08:00
|
|
|
num_online_cpus());
|
|
|
|
if (!fsverity_read_workqueue)
|
2023-07-06 05:27:42 +08:00
|
|
|
panic("failed to allocate fsverity_read_queue");
|
2019-07-23 00:26:23 +08:00
|
|
|
}
|