2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 20:23:57 +08:00

All stable@ fixes:

- Fix DM thinp btree corruption seen when inserting a new key/value pair
   into a full root node.
 
 - Fix DM thinp btree removal deadlock due to artificially low number of
   allowed concurrent locks allowed.
 
 - Fix possible DM crypt corruption if kernel keyring service is used.
   Only affects ciphers using following IVs: essiv, lmk and tcw.
 
 - Two DM crypt device initialization error checking fixes.
 
 - Fix DM integrity to allow use of async ciphers that require DMA.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJaYmmiAAoJEMUj8QotnQNa55QIANbqG20YBvLhejdIvoOC4npL
 Lk6PGfAGjftUBocnaua3BMaCCHnPF0DCmVqEZ8Bb07eIUoOygrAbEgJ/o1GjA1Ku
 6oLmw+yMls0cu3Qk0aOF/5Z/TYl/fNgYAH9g7oMy4g4DNeD9nEKdRsz/DF2JTVWH
 t0AUnmMHFLXP001TBW5k4SnC0jqM6JCwd/dn9rlSW6oE5mNLo22+A0vPdpW2tVNW
 odP2mXAlLQUDTrn9wtAKp7yBJKqbIO56Odv2hQ48VOBjSTB/GMue5OY2tibfS2ci
 /qo4OzVwzBDAB+bU8MN6AP6MqimvyPUeCeyH20X8Zzbmsd2Mwz+qd+WOsHrNiNg=
 =R3/E
 -----END PGP SIGNATURE-----

Merge tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:
 "All fixes marked for stable:

   - Fix DM thinp btree corruption seen when inserting a new key/value
     pair into a full root node.

   - Fix DM thinp btree removal deadlock due to artificially low number
     of allowed concurrent locks allowed.

   - Fix possible DM crypt corruption if kernel keyring service is used.
     Only affects ciphers using following IVs: essiv, lmk and tcw.

   - Two DM crypt device initialization error checking fixes.

   - Fix DM integrity to allow use of async ciphers that require DMA"

* tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm crypt: fix error return code in crypt_ctr()
  dm crypt: wipe kernel key copy after IV initialization
  dm integrity: don't store cipher request on the stack
  dm crypt: fix crash by adding missing check for auth key size
  dm btree: fix serious bug in btree_split_beneath()
  dm thin metadata: THIN_MAX_CONCURRENT_LOCKS should be 6
This commit is contained in:
Linus Torvalds 2018-01-19 15:16:49 -08:00
commit 1cf55613a6
4 changed files with 59 additions and 35 deletions

View File

@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
/* Ignore extra keys (which are used for IV etc) */
subkey_size = crypt_subkey_size(cc);
if (crypt_integrity_hmac(cc))
if (crypt_integrity_hmac(cc)) {
if (subkey_size < cc->key_mac_size)
return -EINVAL;
crypt_copy_authenckey(cc->authenc_key, cc->key,
subkey_size - cc->key_mac_size,
cc->key_mac_size);
}
for (i = 0; i < cc->tfms_count; i++) {
if (crypt_integrity_hmac(cc))
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
ret = crypt_setkey(cc);
/* wipe the kernel key payload copy in each case */
memset(cc->key, 0, cc->key_size * sizeof(u8));
if (!ret) {
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
kzfree(cc->key_string);
@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
}
}
/* wipe the kernel key payload copy */
if (cc->key_string)
memset(cc->key, 0, cc->key_size * sizeof(u8));
return ret;
}
@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
if (!cc->tag_pool) {
ti->error = "Cannot allocate integrity tags mempool";
ret = -ENOMEM;
goto bad;
}
@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return ret;
if (cc->iv_gen_ops && cc->iv_gen_ops->init)
ret = cc->iv_gen_ops->init(cc);
/* wipe the kernel key payload copy */
if (cc->key_string)
memset(cc->key, 0, cc->key_size * sizeof(u8));
return ret;
}
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
.version = {1, 18, 0},
.version = {1, 18, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,

View File

@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
int r = 0;
unsigned i;
__u64 journal_pages, journal_desc_size, journal_tree_size;
unsigned char *crypt_data = NULL;
unsigned char *crypt_data = NULL, *crypt_iv = NULL;
struct skcipher_request *req = NULL;
ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
if (blocksize == 1) {
struct scatterlist *sg;
SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
unsigned char iv[ivsize];
skcipher_request_set_tfm(req, ic->journal_crypt);
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
if (!req) {
*error = "Could not allocate crypt request";
r = -ENOMEM;
goto bad;
}
crypt_iv = kmalloc(ivsize, GFP_KERNEL);
if (!crypt_iv) {
*error = "Could not allocate iv";
r = -ENOMEM;
goto bad;
}
ic->journal_xor = dm_integrity_alloc_page_list(ic);
if (!ic->journal_xor) {
@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], va, PAGE_SIZE);
}
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
memset(iv, 0x00, ivsize);
memset(crypt_iv, 0x00, ivsize);
skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
crypto_free_skcipher(ic->journal_crypt);
ic->journal_crypt = NULL;
} else {
SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
unsigned char iv[ivsize];
unsigned crypt_len = roundup(ivsize, blocksize);
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
if (!req) {
*error = "Could not allocate crypt request";
r = -ENOMEM;
goto bad;
}
crypt_iv = kmalloc(ivsize, GFP_KERNEL);
if (!crypt_iv) {
*error = "Could not allocate iv";
r = -ENOMEM;
goto bad;
}
crypt_data = kmalloc(crypt_len, GFP_KERNEL);
if (!crypt_data) {
*error = "Unable to allocate crypt data";
@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
skcipher_request_set_tfm(req, ic->journal_crypt);
ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
if (!ic->journal_scatterlist) {
*error = "Unable to allocate sg list";
@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
struct skcipher_request *section_req;
__u32 section_le = cpu_to_le32(i);
memset(iv, 0x00, ivsize);
memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
sg_init_one(&sg, crypt_data, crypt_len);
skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@ -2758,6 +2780,9 @@ retest_commit_id:
}
bad:
kfree(crypt_data);
kfree(crypt_iv);
skcipher_request_free(req);
return r;
}

View File

@ -80,10 +80,14 @@
#define SECTOR_TO_BLOCK_SHIFT 3
/*
* For btree insert:
* 3 for btree insert +
* 2 for btree lookup used within space map
* For btree remove:
* 2 for shadow spine +
* 4 for rebalance 3 child node
*/
#define THIN_MAX_CONCURRENT_LOCKS 5
#define THIN_MAX_CONCURRENT_LOCKS 6
/* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128

View File

@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
pn->keys[1] = rn->keys[0];
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
/*
* rejig the spine. This is ugly, since it knows too
* much about the spine
*/
if (s->nodes[0] != new_parent) {
unlock_block(s->info, s->nodes[0]);
s->nodes[0] = new_parent;
}
if (key < le64_to_cpu(rn->keys[0])) {
unlock_block(s->info, right);
s->nodes[1] = left;
} else {
unlock_block(s->info, left);
s->nodes[1] = right;
}
s->count = 2;
unlock_block(s->info, left);
unlock_block(s->info, right);
return 0;
}