2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 01:04:08 +08:00

Fixes for some recent regressions including fallout from the vmalloc'd

stack change (after which we can no longer encrypt stuff on the stack).
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJYHNwpAAoJECebzXlCjuG++DMP/3mUUAF09DfFR/EHl7knDT1f
 kZ53UVHYzr02w0wXfwxVLlp2H7TdSAufgsSvPT6qksA3eY7gL6nJ9zHkl+Nv5yCx
 y6vsFWjO1QEUWFOZWCKcmT2dAI3Ddt9IhK13pfZEKN1XKvK2zWB16HEVzSg6fR2K
 NwHlpMnQUI4HWThURzwTZb1M5YhxRCAnyiv8BTAAPjbEfzPzdL7j3jxwqtH8bOWp
 qIcDDvjC744b9zy0YuAEY/NyGBhYZPdM6gWsBBes1TRzBWUL9qsUYTWDJTmg/F1l
 Or0Jz7CUEN9uOHLGnkATPDc+eBg9YFV+bSsSnJu1/W4Er7dX1Af/lol79zEp/Zw1
 Snd9FelSPj3vxmYAFTCLnHRTRgsyiDhbbb7gVrzH9bxnCrRNR6p2kY018s1Cl9Td
 uWQoNNFQwwnYxWYEeZdO5PgX+pcgoCzhHACNk5oA93YaBE0GuLHHugwwIrYE8TM1
 1iY20sLC5lJcnPqxdgnoprZnnHMuL6rx5KRbvBeflNZ4huK2PIcPJyeB83XH6s12
 G67PjJ0rfWzSBF14O/ZtQA6he+kXvnH3pKqpNnaMiBxZZ2J8E1eQvrKTLLIwmtlP
 18KKJpZIzh7jTTZ/99nAMAt/BGw97P9TToLdnI8dCxYygHEaywpEYtcsE8IWFAvA
 3XkS5QdlJhhAaAUUYBXy
 =oPbZ
 -----END PGP SIGNATURE-----

Merge tag 'nfsd-4.9-1' of git://linux-nfs.org/~bfields/linux

Pull nfsd bugfixes from Bruce Fields:
 "Fixes for some recent regressions including fallout from the vmalloc'd
  stack change (after which we can no longer encrypt stuff on the
  stack)"

* tag 'nfsd-4.9-1' of git://linux-nfs.org/~bfields/linux:
  nfsd: Fix general protection fault in release_lock_stateid()
  svcrdma: backchannel cannot share a page for send and rcv buffers
  sunrpc: fix some missing rq_rbuffer assignments
  sunrpc: don't pass on-stack memory to sg_set_buf
  nfsd: move blocked lock handling under a dedicated spinlock
This commit is contained in:
Linus Torvalds 2016-11-04 20:12:10 -07:00
commit fb415f222c
7 changed files with 107 additions and 67 deletions

View File

@ -84,6 +84,8 @@ struct nfsd_net {
struct list_head client_lru;
struct list_head close_lru;
struct list_head del_recall_lru;
/* protected by blocked_locks_lock */
struct list_head blocked_locks_lru;
struct delayed_work laundromat_work;
@ -91,6 +93,9 @@ struct nfsd_net {
/* client_lock protects the client lru list and session hash table */
spinlock_t client_lock;
/* protects blocked_locks_lru */
spinlock_t blocked_locks_lock;
struct file *rec_file;
bool in_grace;
const struct nfsd4_client_tracking_ops *client_tracking_ops;

View File

@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
{
struct nfsd4_blocked_lock *cur, *found = NULL;
spin_lock(&nn->client_lock);
spin_lock(&nn->blocked_locks_lock);
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
break;
}
}
spin_unlock(&nn->client_lock);
spin_unlock(&nn->blocked_locks_lock);
if (found)
posix_unblock_lock(&found->nbl_lock);
return found;
@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn)
* indefinitely once the lock does become free.
*/
BUG_ON(!list_empty(&reaplist));
spin_lock(&nn->client_lock);
spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn)
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
spin_unlock(&nn->client_lock);
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl)
bool queue = false;
/* An empty list means that something else is going to be using it */
spin_lock(&nn->client_lock);
spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list)) {
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
queue = true;
}
spin_unlock(&nn->client_lock);
spin_unlock(&nn->blocked_locks_lock);
if (queue)
nfsd4_run_cb(&nbl->nbl_cb);
@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (fl_flags & FL_SLEEP) {
nbl->nbl_time = jiffies;
spin_lock(&nn->client_lock);
spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
spin_unlock(&nn->client_lock);
spin_unlock(&nn->blocked_locks_lock);
}
err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
@ -5900,10 +5898,10 @@ out:
if (nbl) {
/* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) {
spin_lock(&nn->client_lock);
spin_lock(&nn->blocked_locks_lock);
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
spin_unlock(&nn->client_lock);
spin_unlock(&nn->blocked_locks_lock);
}
free_blocked_lock(nbl);
}
@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
get_net(net);
@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net)
}
BUG_ON(!list_empty(&reaplist));
spin_lock(&nn->client_lock);
spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
spin_unlock(&nn->client_lock);
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,

View File

@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
__be32 seq;
__be32 *seq = NULL;
struct kvec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p)
goto out_bad;
if (flav != RPC_AUTH_GSS)
goto out_bad;
seq = htonl(task->tk_rqstp->rq_seqno);
iov.iov_base = &seq;
iov.iov_len = sizeof(seq);
seq = kmalloc(4, GFP_NOFS);
if (!seq)
goto out_bad;
*seq = htonl(task->tk_rqstp->rq_seqno);
iov.iov_base = seq;
iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_buf);
mic.data = (u8 *)p;
mic.len = len;
@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p)
gss_put_ctx(ctx);
dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
task->tk_pid, __func__);
kfree(seq);
return p + XDR_QUADLEN(len);
out_bad:
gss_put_ctx(ctx);
dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
PTR_ERR(ret));
kfree(seq);
return ret;
}

View File

@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
int err = -1;
u8 *checksumdata;
u8 rc4salt[4];
struct crypto_ahash *md5;
struct crypto_ahash *hmac_md5;
@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
if (!checksumdata)
return GSS_S_FAILURE;
md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(md5))
return GSS_S_FAILURE;
goto out_free_cksum;
hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_md5)) {
crypto_free_ahash(md5);
return GSS_S_FAILURE;
}
if (IS_ERR(hmac_md5))
goto out_free_md5;
req = ahash_request_alloc(md5, GFP_KERNEL);
if (!req) {
crypto_free_ahash(hmac_md5);
crypto_free_ahash(md5);
return GSS_S_FAILURE;
}
if (!req)
goto out_free_hmac_md5;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
ahash_request_free(req);
req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
if (!req) {
crypto_free_ahash(hmac_md5);
crypto_free_ahash(md5);
return GSS_S_FAILURE;
}
if (!req)
goto out_free_hmac_md5;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength;
out:
ahash_request_free(req);
crypto_free_ahash(md5);
out_free_hmac_md5:
crypto_free_ahash(hmac_md5);
out_free_md5:
crypto_free_ahash(md5);
out_free_cksum:
kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm;
struct ahash_request *req;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
int err = -1;
u8 *checksumdata;
unsigned int checksumlen;
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
if (checksumdata == NULL)
return GSS_S_FAILURE;
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto out_free_cksum;
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
crypto_free_ahash(tfm);
return GSS_S_FAILURE;
}
if (!req)
goto out_free_ahash;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength;
out:
ahash_request_free(req);
out_free_ahash:
crypto_free_ahash(tfm);
out_free_cksum:
kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm;
struct ahash_request *req;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
int err = -1;
u8 *checksumdata;
unsigned int checksumlen;
if (kctx->gk5e->keyed_cksum == 0) {
@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
if (!checksumdata)
return GSS_S_FAILURE;
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return GSS_S_FAILURE;
goto out_free_cksum;
checksumlen = crypto_ahash_digestsize(tfm);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
crypto_free_ahash(tfm);
return GSS_S_FAILURE;
}
if (!req)
goto out_free_ahash;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
}
out:
ahash_request_free(req);
out_free_ahash:
crypto_free_ahash(tfm);
out_free_cksum:
kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
u32 ret;
struct scatterlist sg[1];
SKCIPHER_REQUEST_ON_STACK(req, cipher);
u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
u8 *data;
struct page **save_pages;
u32 len = buf->len - offset;
if (len > ARRAY_SIZE(data)) {
if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
WARN_ON(0);
return -ENOMEM;
}
data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
if (!data)
return -ENOMEM;
/*
* For encryption, we want to read from the cleartext
@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
ret = write_bytes_to_xdr_buf(buf, offset, data, len);
out:
kfree(data);
return ret;
}

View File

@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp)
static int
gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
{
__be32 xdr_seq;
__be32 *xdr_seq;
u32 maj_stat;
struct xdr_buf verf_data;
struct xdr_netobj mic;
__be32 *p;
struct kvec iov;
int err = -1;
svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
xdr_seq = htonl(seq);
xdr_seq = kmalloc(4, GFP_KERNEL);
if (!xdr_seq)
return -1;
*xdr_seq = htonl(seq);
iov.iov_base = &xdr_seq;
iov.iov_len = sizeof(xdr_seq);
iov.iov_base = xdr_seq;
iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_data);
p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
if (maj_stat != GSS_S_COMPLETE)
return -1;
goto out;
*p++ = htonl(mic.len);
memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
p += XDR_QUADLEN(mic.len);
if (!xdr_ressize_check(rqstp, p))
return -1;
return 0;
goto out;
err = 0;
out:
kfree(xdr_seq);
return err;
}
struct gss_domain {

View File

@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
return -EINVAL;
}
/* svc_rdma_sendto releases this page */
page = alloc_page(RPCRDMA_DEF_GFP);
if (!page)
return -ENOMEM;
rqst->rq_buffer = page_address(page);
rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
if (!rqst->rq_rbuffer) {
put_page(page);
return -ENOMEM;
}
return 0;
}
static void
xprt_rdma_bc_free(struct rpc_task *task)
{
/* No-op: ctxt and page have already been freed. */
struct rpc_rqst *rqst = task->tk_rqstp;
kfree(rqst->rq_rbuffer);
}
static int

View File

@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task)
buf->len = PAGE_SIZE;
rqst->rq_buffer = buf->data;
rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
return 0;
}