mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 16:53:54 +08:00
block-5.11-2020-12-23
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl/kHXgQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpkhFEADYuiBZbYEonaV4/nqOhMZ6lXj99rEqVZui AOMm7W8nopb97pWy0sJxZHPPMnjglubkTZbX/2TH08ndppGBQAa9HsgDIITO5Ap3 vjnew6oPsrMtxMUMqR8w8nMb4z5tfqpvUYRPd+qpvYSYLEeSh0UNAEdQ/MOGbA1t nl8UPdNW/s1MbeyLJhU+NgGM0aZahED8KuJeVLOY2im6dySO+CoeoB0/mWrdc0PZ SDcGBEjhmFspDVAkW4Wo8bMw7Cr72es0esvJSJyx0mlo0jSCR7ZYParDtPwuAl9H thKo+brLibz9M+wRtW+7w37oUADTTRL+KV2xJ/J+DeAVjI7/hxgZKewh6hEORmrF DwjbS8cnEVp70rfF7+Z9FV/+2GJXm1uI/swBi69Y42CQ33FNLZmikBW6Q3pimrDj ZeQSCLRpGlo01xtJpsm8L71KGSfYd+njWaIFnESPhxJ+sTxd8kvFRsdjHlc4KNBG UnMzvn6pE3WNhzJJoIqdM2uXa4XDyKkMfO7VbpUgbyij103jpbs7ruWXq3DDU5/t Gdwa821zlq5azDYAw7PNb7FhKrsHhiKhOuxyKqJUbOUkOHBNwxJmxXcaMnR3fOSi B+AlqYhC6A9DhkL0HG0QLcdFwYawpOsDxbFUbemt/UXn74lAjRnp8+rNMSn5tjbn OCnk4Tpaww== =Q42d -----END PGP SIGNATURE----- Merge tag 'block-5.11-2020-12-23' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A few stragglers in here, but mostly just straight fixes. In particular: - Set of rnbd fixes for issues around changes for the merge window (Gioh, Jack, Md Haris Iqbal) - iocost tracepoint addition (Baolin) - Copyright/maintainers update (Christoph) - Remove old blk-mq fast path CPU warning (Daniel) - loop max_part fix (Josh) - Remote IPI threaded IRQ fix (Sebastian) - dasd stable fixes (Stefan) - bcache merge window fixup and style fixup (Yi, Zheng)" * tag 'block-5.11-2020-12-23' of git://git.kernel.dk/linux-block: md/bcache: convert comma to semicolon bcache:remove a superfluous check in register_bcache block: update some copyrights block: remove a pointless self-reference in block_dev.c MAINTAINERS: add fs/block_dev.c to the block section blk-mq: Don't complete on a remote CPU in force threaded mode s390/dasd: fix list corruption of lcu list s390/dasd: fix list corruption of pavgroup group list s390/dasd: prevent inconsistent LCU device data s390/dasd: fix hanging device offline processing blk-iocost: Add iocg idle state tracepoint nbd: Respect max_part for all partition scans block/rnbd-clt: Does not request pdu to rtrs-clt block/rnbd-clt: Dynamically allocate sglist for rnbd_iu block/rnbd: Set write-back cache and fua same to the target device block/rnbd: Fix typos block/rnbd-srv: Protect dev session sysfs removal block/rnbd-clt: Fix possible memleak block/rnbd-clt: Get rid of warning regarding size argument in strlcpy blk-mq: Remove 'running from the wrong CPU' warning
This commit is contained in:
commit
771e7e4161
@ -3199,6 +3199,7 @@ S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
F: block/
|
||||
F: drivers/block/
|
||||
F: fs/block_dev.c
|
||||
F: include/linux/blk*
|
||||
F: kernel/trace/blktrace.c
|
||||
F: lib/sbitmap.c
|
||||
|
@ -2185,6 +2185,9 @@ static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
|
||||
WEIGHT_ONE);
|
||||
}
|
||||
|
||||
TRACE_IOCG_PATH(iocg_idle, iocg, now,
|
||||
atomic64_read(&iocg->active_period),
|
||||
atomic64_read(&ioc->cur_period), vtime);
|
||||
__propagate_weights(iocg, 0, 0, false, now);
|
||||
list_del_init(&iocg->active_list);
|
||||
}
|
||||
|
@ -650,6 +650,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
|
||||
if (!IS_ENABLED(CONFIG_SMP) ||
|
||||
!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
|
||||
return false;
|
||||
/*
|
||||
* With force threaded interrupts enabled, raising softirq from an SMP
|
||||
* function call will always result in waking the ksoftirqd thread.
|
||||
* This is probably worse than completing the request on a different
|
||||
* cache domain.
|
||||
*/
|
||||
if (force_irqthreads)
|
||||
return false;
|
||||
|
||||
/* same CPU or cache domain? Complete locally */
|
||||
if (cpu == rq->mq_ctx->cpu ||
|
||||
@ -1495,31 +1503,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
int srcu_idx;
|
||||
|
||||
/*
|
||||
* We should be running this queue from one of the CPUs that
|
||||
* are mapped to it.
|
||||
*
|
||||
* There are at least two related races now between setting
|
||||
* hctx->next_cpu from blk_mq_hctx_next_cpu() and running
|
||||
* __blk_mq_run_hw_queue():
|
||||
*
|
||||
* - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
|
||||
* but later it becomes online, then this warning is harmless
|
||||
* at all
|
||||
*
|
||||
* - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
|
||||
* but later it becomes offline, then the warning can't be
|
||||
* triggered, and we depend on blk-mq timeout handler to
|
||||
* handle dispatched requests to this hctx
|
||||
*/
|
||||
if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
|
||||
cpu_online(hctx->next_cpu)) {
|
||||
printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
|
||||
raw_smp_processor_id(),
|
||||
cpumask_empty(hctx->cpumask) ? "inactive": "active");
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't run the queue inline with ints disabled. Ensure that
|
||||
* we catch bad users of this early.
|
||||
|
@ -1,6 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* gendisk handling
|
||||
*
|
||||
* Portions Copyright (C) 2020 Christoph Hellwig
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -2,6 +2,7 @@
|
||||
/*
|
||||
* Copyright (C) 1991-1998 Linus Torvalds
|
||||
* Re-organised Feb 1998 Russell King
|
||||
* Copyright (C) 2020 Christoph Hellwig
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -318,7 +318,8 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
|
||||
blk_queue_logical_block_size(nbd->disk->queue, blksize);
|
||||
blk_queue_physical_block_size(nbd->disk->queue, blksize);
|
||||
|
||||
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
|
||||
if (max_part)
|
||||
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
|
||||
if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
|
||||
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
|
||||
return 0;
|
||||
@ -1476,9 +1477,11 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
refcount_inc(&nbd->refs);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
|
||||
if (max_part)
|
||||
set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
|
||||
} else if (nbd_disconnected(nbd->config)) {
|
||||
set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
|
||||
if (max_part)
|
||||
set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&nbd_index_mutex);
|
||||
|
@ -432,7 +432,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
|
||||
* i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
|
||||
* of sysfs link already was removed already.
|
||||
*/
|
||||
if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) {
|
||||
if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
|
||||
sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
|
||||
kfree(dev->blk_symlink_name);
|
||||
module_put(THIS_MODULE);
|
||||
@ -521,7 +521,8 @@ static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
dev->blk_symlink_name[0] = '\0';
|
||||
kfree(dev->blk_symlink_name);
|
||||
dev->blk_symlink_name = NULL ;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -88,6 +88,8 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
|
||||
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
|
||||
dev->secure_discard = le16_to_cpu(rsp->secure_discard);
|
||||
dev->rotational = rsp->rotational;
|
||||
dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
|
||||
dev->fua = !!(rsp->cache_policy & RNBD_FUA);
|
||||
|
||||
dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
|
||||
dev->max_segments = BMAX_SEGMENTS;
|
||||
@ -347,19 +349,26 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
|
||||
struct rnbd_iu *iu;
|
||||
struct rtrs_permit *permit;
|
||||
|
||||
iu = kzalloc(sizeof(*iu), GFP_KERNEL);
|
||||
if (!iu) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
permit = rnbd_get_permit(sess, con_type,
|
||||
wait ? RTRS_PERMIT_WAIT :
|
||||
RTRS_PERMIT_NOWAIT);
|
||||
if (unlikely(!permit))
|
||||
if (unlikely(!permit)) {
|
||||
kfree(iu);
|
||||
return NULL;
|
||||
iu = rtrs_permit_to_pdu(permit);
|
||||
}
|
||||
|
||||
iu->permit = permit;
|
||||
/*
|
||||
* 1st reference is dropped after finishing sending a "user" message,
|
||||
* 2nd reference is dropped after confirmation with the response is
|
||||
* returned.
|
||||
* 1st and 2nd can happen in any order, so the rnbd_iu should be
|
||||
* released (rtrs_permit returned to ibbtrs) only leased after both
|
||||
* released (rtrs_permit returned to rtrs) only after both
|
||||
* are finished.
|
||||
*/
|
||||
atomic_set(&iu->refcount, 2);
|
||||
@ -371,8 +380,10 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
|
||||
|
||||
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
|
||||
{
|
||||
if (atomic_dec_and_test(&iu->refcount))
|
||||
if (atomic_dec_and_test(&iu->refcount)) {
|
||||
rnbd_put_permit(sess, iu->permit);
|
||||
kfree(iu);
|
||||
}
|
||||
}
|
||||
|
||||
static void rnbd_softirq_done_fn(struct request *rq)
|
||||
@ -382,6 +393,7 @@ static void rnbd_softirq_done_fn(struct request *rq)
|
||||
struct rnbd_iu *iu;
|
||||
|
||||
iu = blk_mq_rq_to_pdu(rq);
|
||||
sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
|
||||
rnbd_put_permit(sess, iu->permit);
|
||||
blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
|
||||
}
|
||||
@ -475,7 +487,7 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
|
||||
iu->buf = NULL;
|
||||
iu->dev = dev;
|
||||
|
||||
sg_mark_end(&iu->sglist[0]);
|
||||
sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
|
||||
|
||||
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
|
||||
msg.device_id = cpu_to_le32(device_id);
|
||||
@ -490,6 +502,7 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
|
||||
err = errno;
|
||||
}
|
||||
|
||||
sg_free_table(&iu->sgt);
|
||||
rnbd_put_iu(sess, iu);
|
||||
return err;
|
||||
}
|
||||
@ -562,7 +575,8 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
|
||||
iu->buf = rsp;
|
||||
iu->dev = dev;
|
||||
|
||||
sg_init_one(iu->sglist, rsp, sizeof(*rsp));
|
||||
sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
|
||||
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
|
||||
|
||||
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
|
||||
msg.access_mode = dev->access_mode;
|
||||
@ -570,7 +584,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
|
||||
|
||||
WARN_ON(!rnbd_clt_get_dev(dev));
|
||||
err = send_usr_msg(sess->rtrs, READ, iu,
|
||||
&vec, sizeof(*rsp), iu->sglist, 1,
|
||||
&vec, sizeof(*rsp), iu->sgt.sgl, 1,
|
||||
msg_open_conf, &errno, wait);
|
||||
if (err) {
|
||||
rnbd_clt_put_dev(dev);
|
||||
@ -580,6 +594,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
|
||||
err = errno;
|
||||
}
|
||||
|
||||
sg_free_table(&iu->sgt);
|
||||
rnbd_put_iu(sess, iu);
|
||||
return err;
|
||||
}
|
||||
@ -608,7 +623,8 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
|
||||
iu->buf = rsp;
|
||||
iu->sess = sess;
|
||||
|
||||
sg_init_one(iu->sglist, rsp, sizeof(*rsp));
|
||||
sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
|
||||
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
|
||||
|
||||
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
|
||||
msg.ver = RNBD_PROTO_VER_MAJOR;
|
||||
@ -624,7 +640,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
|
||||
goto put_iu;
|
||||
}
|
||||
err = send_usr_msg(sess->rtrs, READ, iu,
|
||||
&vec, sizeof(*rsp), iu->sglist, 1,
|
||||
&vec, sizeof(*rsp), iu->sgt.sgl, 1,
|
||||
msg_sess_info_conf, &errno, wait);
|
||||
if (err) {
|
||||
rnbd_clt_put_sess(sess);
|
||||
@ -634,7 +650,7 @@ put_iu:
|
||||
} else {
|
||||
err = errno;
|
||||
}
|
||||
|
||||
sg_free_table(&iu->sgt);
|
||||
rnbd_put_iu(sess, iu);
|
||||
return err;
|
||||
}
|
||||
@ -803,7 +819,7 @@ static struct rnbd_clt_session *alloc_sess(const char *sessname)
|
||||
rnbd_init_cpu_qlists(sess->cpu_queues);
|
||||
|
||||
/*
|
||||
* That is simple percpu variable which stores cpu indeces, which are
|
||||
* That is simple percpu variable which stores cpu indices, which are
|
||||
* incremented on each access. We need that for the sake of fairness
|
||||
* to wake up queues in a round-robin manner.
|
||||
*/
|
||||
@ -1014,11 +1030,10 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
|
||||
* See queue limits.
|
||||
*/
|
||||
if (req_op(rq) != REQ_OP_DISCARD)
|
||||
sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist);
|
||||
sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
|
||||
|
||||
if (sg_cnt == 0)
|
||||
/* Do not forget to mark the end */
|
||||
sg_mark_end(&iu->sglist[0]);
|
||||
sg_mark_end(&iu->sgt.sgl[0]);
|
||||
|
||||
msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
|
||||
msg.device_id = cpu_to_le32(dev->device_id);
|
||||
@ -1027,13 +1042,13 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
|
||||
.iov_base = &msg,
|
||||
.iov_len = sizeof(msg)
|
||||
};
|
||||
size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt);
|
||||
size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt);
|
||||
req_ops = (struct rtrs_clt_req_ops) {
|
||||
.priv = iu,
|
||||
.conf_fn = msg_io_conf,
|
||||
};
|
||||
err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
|
||||
&vec, 1, size, iu->sglist, sg_cnt);
|
||||
&vec, 1, size, iu->sgt.sgl, sg_cnt);
|
||||
if (unlikely(err)) {
|
||||
rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
|
||||
err);
|
||||
@ -1120,6 +1135,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
|
||||
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
|
||||
int err;
|
||||
blk_status_t ret = BLK_STS_IOERR;
|
||||
|
||||
if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
|
||||
return BLK_STS_IOERR;
|
||||
@ -1131,32 +1147,35 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
|
||||
iu->sgt.sgl = iu->first_sgl;
|
||||
err = sg_alloc_table_chained(&iu->sgt,
|
||||
/* Even-if the request has no segment,
|
||||
* sglist must have one entry at least */
|
||||
blk_rq_nr_phys_segments(rq) ? : 1,
|
||||
iu->sgt.sgl,
|
||||
RNBD_INLINE_SG_CNT);
|
||||
if (err) {
|
||||
rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err);
|
||||
rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
|
||||
rnbd_put_permit(dev->sess, iu->permit);
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
|
||||
blk_mq_start_request(rq);
|
||||
err = rnbd_client_xfer_request(dev, rq, iu);
|
||||
if (likely(err == 0))
|
||||
return BLK_STS_OK;
|
||||
if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
|
||||
rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
|
||||
rnbd_put_permit(dev->sess, iu->permit);
|
||||
return BLK_STS_RESOURCE;
|
||||
ret = BLK_STS_RESOURCE;
|
||||
}
|
||||
|
||||
sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
|
||||
rnbd_put_permit(dev->sess, iu->permit);
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
sg_init_table(iu->sglist, BMAX_SEGMENTS);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct blk_mq_ops rnbd_mq_ops = {
|
||||
.queue_rq = rnbd_queue_rq,
|
||||
.init_request = rnbd_init_request,
|
||||
.complete = rnbd_softirq_done_fn,
|
||||
};
|
||||
|
||||
@ -1170,7 +1189,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
|
||||
tag_set->numa_node = NUMA_NO_NODE;
|
||||
tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
|
||||
BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||
tag_set->cmd_size = sizeof(struct rnbd_iu);
|
||||
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
|
||||
tag_set->nr_hw_queues = num_online_cpus();
|
||||
|
||||
return blk_mq_alloc_tag_set(tag_set);
|
||||
@ -1208,7 +1227,7 @@ find_and_get_or_create_sess(const char *sessname,
|
||||
*/
|
||||
sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
|
||||
paths, path_cnt, port_nr,
|
||||
sizeof(struct rnbd_iu),
|
||||
0, /* Do not use pdu of rtrs */
|
||||
RECONNECT_DELAY, BMAX_SEGMENTS,
|
||||
BLK_MAX_SEGMENT_SIZE,
|
||||
MAX_RECONNECTS);
|
||||
@ -1305,7 +1324,7 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
|
||||
blk_queue_max_segments(dev->queue, dev->max_segments);
|
||||
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
|
||||
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
|
||||
blk_queue_write_cache(dev->queue, true, true);
|
||||
blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
|
||||
dev->queue->queuedata = dev;
|
||||
}
|
||||
|
||||
@ -1388,12 +1407,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
|
||||
goto out_queues;
|
||||
}
|
||||
|
||||
dev->pathname = kzalloc(strlen(pathname) + 1, GFP_KERNEL);
|
||||
dev->pathname = kstrdup(pathname, GFP_KERNEL);
|
||||
if (!dev->pathname) {
|
||||
ret = -ENOMEM;
|
||||
goto out_queues;
|
||||
}
|
||||
strlcpy(dev->pathname, pathname, strlen(pathname) + 1);
|
||||
|
||||
dev->clt_device_id = ret;
|
||||
dev->sess = sess;
|
||||
@ -1529,13 +1547,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
|
||||
}
|
||||
|
||||
rnbd_clt_info(dev,
|
||||
"map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
|
||||
"map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
|
||||
dev->gd->disk_name, dev->nsectors,
|
||||
dev->logical_block_size, dev->physical_block_size,
|
||||
dev->max_write_same_sectors, dev->max_discard_sectors,
|
||||
dev->discard_granularity, dev->discard_alignment,
|
||||
dev->secure_discard, dev->max_segments,
|
||||
dev->max_hw_sectors, dev->rotational);
|
||||
dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
|
||||
|
||||
mutex_unlock(&dev->lock);
|
||||
|
||||
@ -1667,7 +1685,7 @@ static void rnbd_destroy_sessions(void)
|
||||
/*
|
||||
* Here at this point there is no any concurrent access to sessions
|
||||
* list and devices list:
|
||||
* 1. New session or device can'be be created - session sysfs files
|
||||
* 1. New session or device can't be created - session sysfs files
|
||||
* are removed.
|
||||
* 2. Device or session can't be removed - module reference is taken
|
||||
* into account in unmap device sysfs callback.
|
||||
|
@ -44,6 +44,13 @@ struct rnbd_iu_comp {
|
||||
int errno;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARCH_NO_SG_CHAIN
|
||||
#define RNBD_INLINE_SG_CNT 0
|
||||
#else
|
||||
#define RNBD_INLINE_SG_CNT 2
|
||||
#endif
|
||||
#define RNBD_RDMA_SGL_SIZE (sizeof(struct scatterlist) * RNBD_INLINE_SG_CNT)
|
||||
|
||||
struct rnbd_iu {
|
||||
union {
|
||||
struct request *rq; /* for block io */
|
||||
@ -56,11 +63,12 @@ struct rnbd_iu {
|
||||
/* use to send msg associated with a sess */
|
||||
struct rnbd_clt_session *sess;
|
||||
};
|
||||
struct scatterlist sglist[BMAX_SEGMENTS];
|
||||
struct sg_table sgt;
|
||||
struct work_struct work;
|
||||
int errno;
|
||||
struct rnbd_iu_comp comp;
|
||||
atomic_t refcount;
|
||||
struct scatterlist first_sgl[]; /* must be the last one */
|
||||
};
|
||||
|
||||
struct rnbd_cpu_qlist {
|
||||
@ -112,6 +120,8 @@ struct rnbd_clt_dev {
|
||||
enum rnbd_access_mode access_mode;
|
||||
bool read_only;
|
||||
bool rotational;
|
||||
bool wc;
|
||||
bool fua;
|
||||
u32 max_hw_sectors;
|
||||
u32 max_write_same_sectors;
|
||||
u32 max_discard_sectors;
|
||||
|
@ -108,6 +108,11 @@ struct rnbd_msg_close {
|
||||
__le32 device_id;
|
||||
};
|
||||
|
||||
enum rnbd_cache_policy {
|
||||
RNBD_FUA = 1 << 0,
|
||||
RNBD_WRITEBACK = 1 << 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
|
||||
* @hdr: message header
|
||||
@ -124,6 +129,7 @@ struct rnbd_msg_close {
|
||||
* @max_segments: max segments hardware support in one transfer
|
||||
* @secure_discard: supports secure discard
|
||||
* @rotation: is a rotational disc?
|
||||
* @cache_policy: support write-back caching or FUA?
|
||||
*/
|
||||
struct rnbd_msg_open_rsp {
|
||||
struct rnbd_msg_hdr hdr;
|
||||
@ -139,7 +145,8 @@ struct rnbd_msg_open_rsp {
|
||||
__le16 max_segments;
|
||||
__le16 secure_discard;
|
||||
u8 rotational;
|
||||
u8 reserved[11];
|
||||
u8 cache_policy;
|
||||
u8 reserved[10];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -338,9 +338,10 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
|
||||
|
||||
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
|
||||
{
|
||||
mutex_lock(&sess_dev->sess->lock);
|
||||
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
|
||||
mutex_unlock(&sess_dev->sess->lock);
|
||||
sess_dev->keep_id = true;
|
||||
|
||||
}
|
||||
|
||||
static int process_msg_close(struct rtrs_srv *rtrs,
|
||||
@ -549,6 +550,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
|
||||
struct rnbd_srv_sess_dev *sess_dev)
|
||||
{
|
||||
struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
|
||||
struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
|
||||
|
||||
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
|
||||
rsp->device_id =
|
||||
@ -573,8 +575,12 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
|
||||
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
|
||||
rsp->secure_discard =
|
||||
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
|
||||
rsp->rotational =
|
||||
!blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev));
|
||||
rsp->rotational = !blk_queue_nonrot(q);
|
||||
rsp->cache_policy = 0;
|
||||
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||
rsp->cache_policy |= RNBD_WRITEBACK;
|
||||
if (blk_queue_fua(q))
|
||||
rsp->cache_policy |= RNBD_FUA;
|
||||
}
|
||||
|
||||
static struct rnbd_srv_sess_dev *
|
||||
|
@ -157,12 +157,6 @@ void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
|
||||
}
|
||||
EXPORT_SYMBOL(rtrs_clt_put_permit);
|
||||
|
||||
void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
|
||||
{
|
||||
return permit + 1;
|
||||
}
|
||||
EXPORT_SYMBOL(rtrs_permit_to_pdu);
|
||||
|
||||
/**
|
||||
* rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
|
||||
* @sess: client session pointer
|
||||
|
@ -63,13 +63,6 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
|
||||
|
||||
void rtrs_clt_close(struct rtrs_clt *sess);
|
||||
|
||||
/**
|
||||
* rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer
|
||||
* @permit: RTRS permit pointer, it associates the memory allocation for future
|
||||
* RDMA operation.
|
||||
*/
|
||||
void *rtrs_permit_to_pdu(struct rtrs_permit *permit);
|
||||
|
||||
enum {
|
||||
RTRS_PERMIT_NOWAIT = 0,
|
||||
RTRS_PERMIT_WAIT = 1,
|
||||
|
@ -2535,8 +2535,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
|
||||
else
|
||||
err = "device busy";
|
||||
mutex_unlock(&bch_register_lock);
|
||||
if (!IS_ERR(bdev))
|
||||
bdput(bdev);
|
||||
if (attr == &ksysfs_register_quiet)
|
||||
goto done;
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ STORE(__cached_dev)
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
add_uevent_var(env, "DRIVER=bcache");
|
||||
add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
|
||||
add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid);
|
||||
add_uevent_var(env, "CACHED_LABEL=%s", buf);
|
||||
kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
|
||||
KOBJ_CHANGE,
|
||||
|
@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
|
||||
return;
|
||||
device->discipline->get_uid(device, &uid);
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
list_del_init(&device->alias_list);
|
||||
/* make sure that the workers don't use this device */
|
||||
if (device == lcu->suc_data.device) {
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
|
||||
|
||||
spin_lock_irqsave(&aliastree.lock, flags);
|
||||
spin_lock(&lcu->lock);
|
||||
list_del_init(&device->alias_list);
|
||||
if (list_empty(&lcu->grouplist) &&
|
||||
list_empty(&lcu->active_devices) &&
|
||||
list_empty(&lcu->inactive_devices)) {
|
||||
@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
|
||||
rc = dasd_sleep_on(cqr);
|
||||
if (rc && !suborder_not_supported(cqr)) {
|
||||
if (!rc)
|
||||
goto out;
|
||||
|
||||
if (suborder_not_supported(cqr)) {
|
||||
/* suborder not supported or device unusable for IO */
|
||||
rc = -EOPNOTSUPP;
|
||||
} else {
|
||||
/* IO failed but should be retried */
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
lcu->flags |= NEED_UAC_UPDATE;
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
}
|
||||
out:
|
||||
dasd_sfree_request(cqr, cqr->memdev);
|
||||
return rc;
|
||||
}
|
||||
@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
|
||||
return rc;
|
||||
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
/*
|
||||
* there is another update needed skip the remaining handling
|
||||
* the data might already be outdated
|
||||
* but especially do not add the device to an LCU with pending
|
||||
* update
|
||||
*/
|
||||
if (lcu->flags & NEED_UAC_UPDATE)
|
||||
goto out;
|
||||
lcu->pav = NO_PAV;
|
||||
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
|
||||
switch (lcu->uac->unit[i].ua_type) {
|
||||
@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
|
||||
alias_list) {
|
||||
_add_device_to_lcu(lcu, device, refdev);
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device)
|
||||
}
|
||||
if (lcu->flags & UPDATE_PENDING) {
|
||||
list_move(&device->alias_list, &lcu->active_devices);
|
||||
private->pavgroup = NULL;
|
||||
_schedule_lcu_update(lcu, device);
|
||||
}
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
|
@ -1,9 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* linux/fs/block_dev.c
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
|
||||
* Copyright (C) 2016 - 2020 Christoph Hellwig
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
|
@ -11,7 +11,7 @@ struct ioc_gq;
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(iocost_iocg_activate,
|
||||
DECLARE_EVENT_CLASS(iocost_iocg_state,
|
||||
|
||||
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
|
||||
u64 last_period, u64 cur_period, u64 vtime),
|
||||
@ -59,6 +59,20 @@ TRACE_EVENT(iocost_iocg_activate,
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(iocost_iocg_state, iocost_iocg_activate,
|
||||
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
|
||||
u64 last_period, u64 cur_period, u64 vtime),
|
||||
|
||||
TP_ARGS(iocg, path, now, last_period, cur_period, vtime)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(iocost_iocg_state, iocost_iocg_idle,
|
||||
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
|
||||
u64 last_period, u64 cur_period, u64 vtime),
|
||||
|
||||
TP_ARGS(iocg, path, now, last_period, cur_period, vtime)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(iocg_inuse_update,
|
||||
|
||||
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
|
||||
|
Loading…
Reference in New Issue
Block a user