mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
for-linus-20181201
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlwC1c4QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppxmD/4pqn8REEh/QUXWhCJbOXLLLxfQju7Uxs/v j2Bc6W/e7Z9jvKAs06IIhaV6SxBrM0oUebf/hJY0E/kTSHiNPJqx/X3W9hFYOo+p EJau3vavOrxVzgq5zt8S/i//HeanT+H37nE9WDqSRKXTta8JFDw+DoysepILTUvN WGDjuplPcurwmf2W1qES+5vNy/Jpln9ErNuqPBSjc6shozQ8WAzvuupVs+uZEpeK +gqrx0pJYrtoU+pSUK+Bt6bSzzp8Z0qHGIVMAabNULbz43qblK0ILRE+qLFbFwsB 62EMMtX9b2Lsvqpoe2cQ+deQlUalsGVmpyE+7GP/evZbVmtD/NoH6cJQ/dA/tFtw cluL3rWBJKB5OZ1yatDE2/rUYsGo5FzqMUz/tIWSf2FdZcLfhRNLka7DueSA6NQe wtLJU9GrME67+t+PqncjDxoyQYma4oynAcc5dfqlBQv5OP7HDf4TP28g8FdkHjcy fEXAp58516YZiCpoWZf6dPR9fUQ0A1eF+qxHnUacy5tHN4AKPrccU3+k+0WStFNf qaOPkj4kWtv17d2DO4UoqAtBqFO16QCYSsa5+drpDeTOq9QgGqA6O+sGngN0LsxS F7x3msgBIkgEFYFtpuMBXnamdooiZMKrzI0Ctn7PK8b5Qx1OgRNCZcTQD4uql1Fj L6R/6Ynibg== =lMlT -----END PGP SIGNATURE----- Merge tag 'for-linus-20181201' of git://git.kernel.dk/linux-block Pull block layer fixes from Jens Axboe: - Single range elevator discard merge fix, that caused crashes (Ming) - Fix for a regression in O_DIRECT, where we could potentially lose the error value (Maximilian Heyne) - NVMe pull request from Christoph, with little fixes all over the map for NVMe. * tag 'for-linus-20181201' of git://git.kernel.dk/linux-block: block: fix single range discard merge nvme-rdma: fix double freeing of async event data nvme: flush namespace scanning work just before removing namespaces nvme: warn when finding multi-port subsystems without multipathing enabled fs: fix lost error code in dio_complete nvme-pci: fix surprise removal nvme-fc: initialize nvme_req(rq)->ctrl after calling __nvme_fc_init_request() nvme: Free ctrl device name on init failure
This commit is contained in:
commit
880584176e
@ -820,7 +820,7 @@ static struct request *attempt_merge(struct request_queue *q,
|
||||
|
||||
req->__data_len += blk_rq_bytes(next);
|
||||
|
||||
if (req_op(req) != REQ_OP_DISCARD)
|
||||
if (!blk_discard_mergable(req))
|
||||
elv_merge_requests(q, req, next);
|
||||
|
||||
/*
|
||||
|
@ -3314,6 +3314,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
|
||||
struct nvme_ns *ns, *next;
|
||||
LIST_HEAD(ns_list);
|
||||
|
||||
/* prevent racing with ns scanning */
|
||||
flush_work(&ctrl->scan_work);
|
||||
|
||||
/*
|
||||
* The dead states indicates the controller was not gracefully
|
||||
* disconnected. In that case, we won't be able to flush any data while
|
||||
@ -3476,7 +3479,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||
nvme_mpath_stop(ctrl);
|
||||
nvme_stop_keep_alive(ctrl);
|
||||
flush_work(&ctrl->async_event_work);
|
||||
flush_work(&ctrl->scan_work);
|
||||
cancel_work_sync(&ctrl->fw_act_work);
|
||||
if (ctrl->ops->stop_ctrl)
|
||||
ctrl->ops->stop_ctrl(ctrl);
|
||||
@ -3585,7 +3587,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
||||
|
||||
return 0;
|
||||
out_free_name:
|
||||
kfree_const(dev->kobj.name);
|
||||
kfree_const(ctrl->device->kobj.name);
|
||||
out_release_instance:
|
||||
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
|
||||
out:
|
||||
@ -3607,7 +3609,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
|
||||
/* Forcibly unquiesce queues to avoid blocking dispatch */
|
||||
if (ctrl->admin_q)
|
||||
if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
|
||||
blk_mq_unquiesce_queue(ctrl->admin_q);
|
||||
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
|
@ -1752,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
|
||||
int res;
|
||||
|
||||
nvme_req(rq)->ctrl = &ctrl->ctrl;
|
||||
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
|
||||
if (res)
|
||||
return res;
|
||||
op->op.fcp_req.first_sgl = &op->sgl[0];
|
||||
op->op.fcp_req.private = &op->priv[0];
|
||||
nvme_req(rq)->ctrl = &ctrl->ctrl;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -531,6 +531,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
||||
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
|
||||
struct nvme_id_ctrl *id)
|
||||
{
|
||||
if (ctrl->subsys->cmic & (1 << 3))
|
||||
dev_warn(ctrl->device,
|
||||
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
|
||||
return 0;
|
||||
}
|
||||
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
||||
|
@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
|
||||
qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
|
||||
if (ib_dma_mapping_error(ibdev, qe->dma)) {
|
||||
kfree(qe->data);
|
||||
qe->data = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -823,6 +824,7 @@ out_free_tagset:
|
||||
out_free_async_qe:
|
||||
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
|
||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||
ctrl->async_event_sqe.data = NULL;
|
||||
out_free_queue:
|
||||
nvme_rdma_free_queue(&ctrl->queues[0]);
|
||||
return error;
|
||||
|
@ -325,8 +325,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
|
||||
*/
|
||||
dio->iocb->ki_pos += transferred;
|
||||
|
||||
if (dio->op == REQ_OP_WRITE)
|
||||
ret = generic_write_sync(dio->iocb, transferred);
|
||||
if (ret > 0 && dio->op == REQ_OP_WRITE)
|
||||
ret = generic_write_sync(dio->iocb, ret);
|
||||
dio->iocb->ki_complete(dio->iocb, ret, 0);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user