mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-14 22:44:27 +08:00
block-6.6-2023-10-20
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmUyYnAQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpuidEADVVSUmBSEqEfHtOe0CYqTnXuBvdiBoIgTM uHlhdGRdRdUIO4vDiMMJYdNC2LjJTu1aLf6xJ7WK5wRcb5lxdu/TkcKZ36wD42pw FcgCG77VJm2hzhD/4tsP1o5UGYU+O/J344AHQcdAcVVUwA18ddwO9vgYw73AJWQn e5qXSKqcO724WNyeXv8AMqmfowzwYBMTWFO1i345uiXYQYAHDvLK/PTgLjUTkV/t sNCSH+7+Mxg6ucgsOQbA2LU4GLet34sJy6+JhynpHEpE4mqaWQYTzzDPv5PJuhpg MYqVTkJ/UQNPAv8h+aDNUDzAWGxqcsKvLkmMFmwFUpyZZeM/pAsulCy1C0S1oa75 +OAsqlVfevP7G00HLko0g5iphDHAKJP1+jzEt1Qcf4OGYzHfTXhquI/EyEDL4w84 EVmW54KjR4VplXW0EQAAam4KNwhwIgLFDhPT0nzmpvIZgtJLAjcKBxxflVJo/iSJ 76Zb0gBjvHIB3iUlTKgWkmWraAGpiP469l9Sj5ncZfkLkCprRmRo9gjJW99AET18 J/aDBrr/7dMnGSMwyHWDe983Zel2j20XacEo+zmQS+MECy29UHKgzIL9FG8zmrl+ 5gLEjsVUrfkZnZpUUTa57/Lp4t2B03JThoYIpiLPwGw53/iWmXeZZ1K6giJ6Is7A wiD2QfI00Q== =S5V+ -----END PGP SIGNATURE----- Merge tag 'block-6.6-2023-10-20' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: "A fix for a regression with sed-opal and saved keys, and outside of that an NVMe pull request fixing a few minor issues on that front" * tag 'block-6.6-2023-10-20' of git://git.kernel.dk/linux: nvme-pci: add BOGUS_NID for Intel 0a54 device nvmet-auth: complete a request only after freeing the dhchap pointers nvme: sanitize metadata bounce buffer for reads block: Fix regression in sed-opal for a saved key. nvme-auth: use chap->s2 to indicate bidirectional authentication nvmet-tcp: Fix a possible UAF in queue intialization setup nvme-rdma: do not try to stop unallocated queues
This commit is contained in:
commit
c320008102
@ -2888,11 +2888,10 @@ static int opal_lock_unlock(struct opal_dev *dev,
|
||||
if (lk_unlk->session.who > OPAL_USER9)
|
||||
return -EINVAL;
|
||||
|
||||
ret = opal_get_key(dev, &lk_unlk->session.opal_key);
|
||||
if (ret)
|
||||
return ret;
|
||||
mutex_lock(&dev->dev_lock);
|
||||
opal_lock_check_for_saved_key(dev, lk_unlk);
|
||||
ret = opal_get_key(dev, &lk_unlk->session.opal_key);
|
||||
if (!ret)
|
||||
ret = __opal_lock_unlock(dev, lk_unlk);
|
||||
mutex_unlock(&dev->dev_lock);
|
||||
|
||||
|
@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
|
||||
struct nvmf_auth_dhchap_success1_data *data = chap->buf;
|
||||
size_t size = sizeof(*data);
|
||||
|
||||
if (chap->ctrl_key)
|
||||
if (chap->s2)
|
||||
size += chap->hash_len;
|
||||
|
||||
if (size > CHAP_BUF_SIZE) {
|
||||
@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
if (chap->ctrl_key) {
|
||||
if (chap->s2) {
|
||||
/* DH-HMAC-CHAP Step 5: send success2 */
|
||||
dev_dbg(ctrl->device, "%s: qid %d send success2\n",
|
||||
__func__, chap->qid);
|
||||
|
@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
|
||||
if (!buf)
|
||||
goto out;
|
||||
|
||||
if (req_op(req) == REQ_OP_DRV_OUT) {
|
||||
ret = -EFAULT;
|
||||
if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
|
||||
if (copy_from_user(buf, ubuf, len))
|
||||
goto out_free_meta;
|
||||
} else {
|
||||
memset(buf, 0, len);
|
||||
}
|
||||
|
||||
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
|
||||
if (IS_ERR(bip)) {
|
||||
|
@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
|
||||
.driver_data = NVME_QUIRK_STRIPE_SIZE |
|
||||
NVME_QUIRK_DEALLOCATE_ZEROES |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN |
|
||||
NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
|
||||
.driver_data = NVME_QUIRK_STRIPE_SIZE |
|
||||
NVME_QUIRK_DEALLOCATE_ZEROES, },
|
||||
|
@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
|
||||
|
||||
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
|
||||
{
|
||||
if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
|
||||
return;
|
||||
|
||||
mutex_lock(&queue->queue_lock);
|
||||
if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
|
||||
__nvme_rdma_stop_queue(queue);
|
||||
|
@ -333,19 +333,21 @@ done:
|
||||
__func__, ctrl->cntlid, req->sq->qid,
|
||||
status, req->error_loc);
|
||||
req->cqe->result.u64 = 0;
|
||||
nvmet_req_complete(req, status);
|
||||
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
|
||||
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
|
||||
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
|
||||
|
||||
mod_delayed_work(system_wq, &req->sq->auth_expired_work,
|
||||
auth_expire_secs * HZ);
|
||||
return;
|
||||
goto complete;
|
||||
}
|
||||
/* Final states, clear up variables */
|
||||
nvmet_auth_sq_free(req->sq);
|
||||
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
|
||||
nvmet_ctrl_fatal_error(ctrl);
|
||||
|
||||
complete:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
|
||||
@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
|
||||
kfree(d);
|
||||
done:
|
||||
req->cqe->result.u64 = 0;
|
||||
nvmet_req_complete(req, status);
|
||||
|
||||
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
|
||||
nvmet_auth_sq_free(req->sq);
|
||||
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
|
||||
nvmet_auth_sq_free(req->sq);
|
||||
nvmet_ctrl_fatal_error(ctrl);
|
||||
}
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
|
||||
|
||||
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
|
||||
{
|
||||
queue->rcv_state = NVMET_TCP_RECV_ERR;
|
||||
if (status == -EPIPE || status == -ECONNRESET)
|
||||
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
|
||||
else
|
||||
@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
|
||||
iov.iov_len = sizeof(*icresp);
|
||||
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
|
||||
if (ret < 0)
|
||||
goto free_crypto;
|
||||
return ret; /* queue removal will cleanup */
|
||||
|
||||
queue->state = NVMET_TCP_Q_LIVE;
|
||||
nvmet_prepare_receive_pdu(queue);
|
||||
return 0;
|
||||
free_crypto:
|
||||
if (queue->hdr_digest || queue->data_digest)
|
||||
nvmet_tcp_free_crypto(queue);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
|
||||
|
Loading…
Reference in New Issue
Block a user