mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
for-5.12/drivers-2021-02-17
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmAtnZ8QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpoOhD/9TZJN6mgvlmO2zuNlZwko0jD+HUYNRHdfa UiZhKs55ShlT/Wd8MMLcmMU2/+iztq4c/ZLK9eS7NHgKTu3GbgsICEZK+XLSTVJh gCwwEnY2dnBAIwBWxLeDG04DQvcwnOhVN1OSmhFbXG3dpElXSyfEjx00niGtl0tE 5YtvmStpqkC0tHlxq8CMyyfaL1ODGmBK2uhDeQCO12SXIgKondJUaI3/H1l1dC5t +yg4PsSLqezo0oWmqdTEE7lcEJs4GK1ZOhIBLtWe6tl/zaVD6DuzJL83pChm8vF+ qV4LCpJL0wUL7IG601AemFcUmEg34oC0FD6GYYhXxVOrlk43V6AfkycZ2rljNRop /+Ff+CmXfWPAwSfJi/vDlveCvgyAJNMpv4/GwynUM1563v3TYy0YjT6Jlz6M3TUn pS0MW7iUHj3t36U3JvcYnITqTPSfTMtYMOsWDx+V4+E9iGsQF1d7KZlCPMvjWAI2 c3QuWjitXd10BZ2qUvSzAg6piv1taBKJxg1PsGlu707mHZp5J6VPAkYQn4rFgjua uCBbmRQCDF/wJ02IBmBUiMPP64UGbkhr+O3MILPqki967BdDDrLzjTs4e5zbMeu/ qB9XRr1Yd95GCyS8I42OCC906NXrJ2R2E8dtV1XoASWGusL/wFZrLd1th8Uq8ibb Os+G4t1Qug== =vx6U -----END PGP SIGNATURE----- Merge tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block Pull block driver updates from Jens Axboe: - Remove the skd driver. It's been EOL for a long time (Damien) - NVMe pull requests - fix multipath handling of ->queue_rq errors (Chao Leng) - nvmet cleanups (Chaitanya Kulkarni) - add a quirk for buggy Amazon controller (Filippo Sironi) - avoid devm allocations in nvme-hwmon that don't interact well with fabrics (Hannes Reinecke) - sysfs cleanups (Jiapeng Chong) - fix nr_zones for multipath (Keith Busch) - nvme-tcp crash fix for no-data commands (Sagi Grimberg) - nvmet-tcp fixes (Sagi Grimberg) - add a missing __rcu annotation (Christoph) - failed reconnect fixes (Chao Leng) - various tracing improvements (Michal Krakowiak, Johannes Thumshirn) - switch the nvmet-fc assoc_list to use RCU protection (Leonid Ravich) - resync the status codes with the latest spec (Max Gurtovoy) - minor nvme-tcp improvements (Sagi Grimberg) - various cleanups (Rikard Falkeborn, Minwoo Im, Chaitanya Kulkarni, Israel Rukshin) - Floppy O_NDELAY fix (Denis) - MD pull request - raid5 chunk_sectors fix (Guoqing) - Use lore links (Kees) - Use DEFINE_SHOW_ATTRIBUTE for nbd (Liao) - loop lock scaling (Pavel) - mtip32xx PCI fixes (Bjorn) - bcache fixes (Kai, Dongdong) - Misc fixes (Tian, Yang, Guoqing, Joe, Andy) * tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block: (64 commits) lightnvm: pblk: Replace guid_copy() with export_guid()/import_guid() lightnvm: fix unnecessary NULL check warnings nvme-tcp: fix crash triggered with a dataless request submission block: Replace lkml.org links with lore nbd: Convert to DEFINE_SHOW_ATTRIBUTE nvme: add 48-bit DMA address quirk for Amazon NVMe controllers nvme-hwmon: rework to avoid devm allocation nvmet: remove else at the end of the function nvmet: add nvmet_req_subsys() helper nvmet: use min of device_path and disk len nvmet: use invalid cmd opcode helper nvmet: use invalid cmd opcode helper nvmet: add helper to report invalid opcode nvmet: remove extra variable in id-ns handler nvmet: make nvmet_find_namespace() req based nvmet: return uniform error for invalid ns nvmet: set status to 0 in case for invalid nsid nvmet-fc: add a missing __rcu annotation to nvmet_fc_tgt_assoc.queues nvme-multipath: set nr_zones for zoned namespaces nvmet-tcp: fix potential race of tcp socket closing accept_work ...
This commit is contained in:
commit
9820b4dca0
@ -16908,12 +16908,6 @@ F: include/linux/static_call*.h
|
||||
F: kernel/jump_label.c
|
||||
F: kernel/static_call.c
|
||||
|
||||
STEC S1220 SKD DRIVER
|
||||
M: Damien Le Moal <Damien.LeMoal@wdc.com>
|
||||
L: linux-block@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/block/skd*[ch]
|
||||
|
||||
STI AUDIO (ASoC) DRIVERS
|
||||
M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
|
@ -267,16 +267,6 @@ config BLK_DEV_NBD
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config BLK_DEV_SKD
|
||||
tristate "STEC S1120 Block Driver"
|
||||
depends on PCI
|
||||
depends on 64BIT
|
||||
help
|
||||
Saying Y or M here will enable support for the
|
||||
STEC, Inc. S1120 PCIe SSD.
|
||||
|
||||
Use device /dev/skd$N amd /dev/skd$Np$M.
|
||||
|
||||
config BLK_DEV_SX8
|
||||
tristate "Promise SATA SX8 support"
|
||||
depends on PCI
|
||||
|
@ -22,7 +22,6 @@ obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
|
||||
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
|
||||
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
|
||||
obj-$(CONFIG_SUNVDC) += sunvdc.o
|
||||
obj-$(CONFIG_BLK_DEV_SKD) += skd.o
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
|
||||
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
|
||||
@ -43,5 +42,4 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
|
||||
|
||||
skd-y := skd_main.o
|
||||
swim_mod-y := swim.o swim_asm.o
|
||||
|
@ -1046,7 +1046,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
|
||||
|
||||
__blk_mq_end_request(rq, err);
|
||||
|
||||
/* cf. http://lkml.org/lkml/2006/10/31/28 */
|
||||
/* cf. https://lore.kernel.org/lkml/20061031071040.GS14055@kernel.dk/ */
|
||||
if (!fastfail)
|
||||
blk_mq_run_hw_queues(q, true);
|
||||
}
|
||||
|
@ -1447,7 +1447,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
|
||||
|
||||
/* drbd_req */
|
||||
extern void do_submit(struct work_struct *ws);
|
||||
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
|
||||
extern void __drbd_make_request(struct drbd_device *, struct bio *);
|
||||
extern blk_qc_t drbd_submit_bio(struct bio *bio);
|
||||
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
|
||||
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
|
||||
|
@ -2275,7 +2275,6 @@ static void do_retry(struct work_struct *ws)
|
||||
list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
|
||||
struct drbd_device *device = req->device;
|
||||
struct bio *bio = req->master_bio;
|
||||
unsigned long start_jif = req->start_jif;
|
||||
bool expected;
|
||||
|
||||
expected =
|
||||
@ -2310,7 +2309,7 @@ static void do_retry(struct work_struct *ws)
|
||||
/* We are not just doing submit_bio_noacct(),
|
||||
* as we want to keep the start_time information. */
|
||||
inc_ap_bio(device);
|
||||
__drbd_make_request(device, bio, start_jif);
|
||||
__drbd_make_request(device, bio);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,8 +111,10 @@ static struct page *page_chain_tail(struct page *page, int *len)
|
||||
{
|
||||
struct page *tmp;
|
||||
int i = 1;
|
||||
while ((tmp = page_chain_next(page)))
|
||||
++i, page = tmp;
|
||||
while ((tmp = page_chain_next(page))) {
|
||||
++i;
|
||||
page = tmp;
|
||||
}
|
||||
if (len)
|
||||
*len = i;
|
||||
return page;
|
||||
|
@ -1191,7 +1191,7 @@ static void drbd_queue_write(struct drbd_device *device, struct drbd_request *re
|
||||
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
|
||||
*/
|
||||
static struct drbd_request *
|
||||
drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
|
||||
drbd_request_prepare(struct drbd_device *device, struct bio *bio)
|
||||
{
|
||||
const int rw = bio_data_dir(bio);
|
||||
struct drbd_request *req;
|
||||
@ -1419,9 +1419,9 @@ out:
|
||||
complete_master_bio(device, &m);
|
||||
}
|
||||
|
||||
void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
|
||||
void __drbd_make_request(struct drbd_device *device, struct bio *bio)
|
||||
{
|
||||
struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
|
||||
struct drbd_request *req = drbd_request_prepare(device, bio);
|
||||
if (IS_ERR_OR_NULL(req))
|
||||
return;
|
||||
drbd_send_and_submit(device, req);
|
||||
@ -1599,19 +1599,16 @@ void do_submit(struct work_struct *ws)
|
||||
blk_qc_t drbd_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
|
||||
unsigned long start_jif;
|
||||
|
||||
blk_queue_split(&bio);
|
||||
|
||||
start_jif = jiffies;
|
||||
|
||||
/*
|
||||
* what we "blindly" assume:
|
||||
*/
|
||||
D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
|
||||
|
||||
inc_ap_bio(device);
|
||||
__drbd_make_request(device, bio, start_jif);
|
||||
__drbd_make_request(device, bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
|
@ -4121,23 +4121,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
if (fdc_state[FDC(drive)].rawcmd == 1)
|
||||
fdc_state[FDC(drive)].rawcmd = 2;
|
||||
|
||||
if (!(mode & FMODE_NDELAY)) {
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
drive_state[drive].last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
|
||||
&drive_state[drive].flags);
|
||||
if (bdev_check_media_change(bdev))
|
||||
floppy_revalidate(bdev->bd_disk);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
}
|
||||
res = -EROFS;
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
drive_state[drive].last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
|
||||
if (bdev_check_media_change(bdev))
|
||||
floppy_revalidate(bdev->bd_disk);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = -EROFS;
|
||||
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
|
||||
mutex_unlock(&open_lock);
|
||||
mutex_unlock(&floppy_mutex);
|
||||
return 0;
|
||||
|
@ -704,7 +704,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||
int error;
|
||||
bool partscan;
|
||||
|
||||
error = mutex_lock_killable(&loop_ctl_mutex);
|
||||
error = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (error)
|
||||
return error;
|
||||
error = -ENXIO;
|
||||
@ -743,9 +743,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||
loop_update_dio(lo);
|
||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
/*
|
||||
* We must drop file reference outside of loop_ctl_mutex as dropping
|
||||
* We must drop file reference outside of lo_mutex as dropping
|
||||
* the file ref can take bd_mutex which creates circular locking
|
||||
* dependency.
|
||||
*/
|
||||
@ -755,7 +755,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
if (file)
|
||||
fput(file);
|
||||
return error;
|
||||
@ -1092,7 +1092,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
||||
goto out_putf;
|
||||
}
|
||||
|
||||
error = mutex_lock_killable(&loop_ctl_mutex);
|
||||
error = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (error)
|
||||
goto out_bdev;
|
||||
|
||||
@ -1171,7 +1171,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
||||
* put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
|
||||
*/
|
||||
bdgrab(bdev);
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
if (partscan)
|
||||
loop_reread_partitions(lo, bdev);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
@ -1179,7 +1179,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
out_bdev:
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_abort_claiming(bdev, loop_configure);
|
||||
@ -1200,7 +1200,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
||||
bool partscan = false;
|
||||
int lo_number;
|
||||
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
|
||||
err = -ENXIO;
|
||||
goto out_unlock;
|
||||
@ -1253,7 +1253,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
||||
lo_number = lo->lo_number;
|
||||
loop_unprepare_queue(lo);
|
||||
out_unlock:
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
if (partscan) {
|
||||
/*
|
||||
* bd_mutex has been held already in release path, so don't
|
||||
@ -1284,18 +1284,17 @@ out_unlock:
|
||||
* protects us from all the other places trying to change the 'lo'
|
||||
* device.
|
||||
*/
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
lo->lo_flags = 0;
|
||||
if (!part_shift)
|
||||
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
|
||||
lo->lo_state = Lo_unbound;
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
|
||||
/*
|
||||
* Need not hold loop_ctl_mutex to fput backing file.
|
||||
* Calling fput holding loop_ctl_mutex triggers a circular
|
||||
* lock dependency possibility warning as fput can take
|
||||
* bd_mutex which is usually taken before loop_ctl_mutex.
|
||||
* Need not hold lo_mutex to fput backing file. Calling fput holding
|
||||
* lo_mutex triggers a circular lock dependency possibility warning as
|
||||
* fput can take bd_mutex which is usually taken before lo_mutex.
|
||||
*/
|
||||
if (filp)
|
||||
fput(filp);
|
||||
@ -1306,11 +1305,11 @@ static int loop_clr_fd(struct loop_device *lo)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mutex_lock_killable(&loop_ctl_mutex);
|
||||
err = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
if (lo->lo_state != Lo_bound) {
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
return -ENXIO;
|
||||
}
|
||||
/*
|
||||
@ -1325,11 +1324,11 @@ static int loop_clr_fd(struct loop_device *lo)
|
||||
*/
|
||||
if (atomic_read(&lo->lo_refcnt) > 1) {
|
||||
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
return 0;
|
||||
}
|
||||
lo->lo_state = Lo_rundown;
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
|
||||
return __loop_clr_fd(lo, false);
|
||||
}
|
||||
@ -1344,7 +1343,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
||||
bool partscan = false;
|
||||
bool size_changed = false;
|
||||
|
||||
err = mutex_lock_killable(&loop_ctl_mutex);
|
||||
err = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
if (lo->lo_encrypt_key_size &&
|
||||
@ -1411,7 +1410,7 @@ out_unfreeze:
|
||||
partscan = true;
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
if (partscan)
|
||||
loop_reread_partitions(lo, bdev);
|
||||
|
||||
@ -1425,11 +1424,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
|
||||
struct kstat stat;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_killable(&loop_ctl_mutex);
|
||||
ret = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (lo->lo_state != Lo_bound) {
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -1448,10 +1447,10 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
|
||||
lo->lo_encrypt_key_size);
|
||||
}
|
||||
|
||||
/* Drop loop_ctl_mutex while we call into the filesystem. */
|
||||
/* Drop lo_mutex while we call into the filesystem. */
|
||||
path = lo->lo_backing_file->f_path;
|
||||
path_get(&path);
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
|
||||
if (!ret) {
|
||||
info->lo_device = huge_encode_dev(stat.dev);
|
||||
@ -1637,7 +1636,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mutex_lock_killable(&loop_ctl_mutex);
|
||||
err = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
switch (cmd) {
|
||||
@ -1653,7 +1652,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
|
||||
default:
|
||||
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
|
||||
}
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1879,27 +1878,33 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
|
||||
struct loop_device *lo;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* take loop_ctl_mutex to protect lo pointer from race with
|
||||
* loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
|
||||
* release it prior to updating lo->lo_refcnt.
|
||||
*/
|
||||
err = mutex_lock_killable(&loop_ctl_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
lo = bdev->bd_disk->private_data;
|
||||
if (!lo) {
|
||||
err = -ENXIO;
|
||||
goto out;
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
atomic_inc(&lo->lo_refcnt);
|
||||
out:
|
||||
err = mutex_lock_killable(&lo->lo_mutex);
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
return err;
|
||||
if (err)
|
||||
return err;
|
||||
atomic_inc(&lo->lo_refcnt);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct loop_device *lo;
|
||||
struct loop_device *lo = disk->private_data;
|
||||
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
lo = disk->private_data;
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
if (atomic_dec_return(&lo->lo_refcnt))
|
||||
goto out_unlock;
|
||||
|
||||
@ -1907,7 +1912,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
if (lo->lo_state != Lo_bound)
|
||||
goto out_unlock;
|
||||
lo->lo_state = Lo_rundown;
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
/*
|
||||
* In autoclear mode, stop the loop thread
|
||||
* and remove configuration after last close.
|
||||
@ -1924,7 +1929,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
}
|
||||
|
||||
static const struct block_device_operations lo_fops = {
|
||||
@ -1963,10 +1968,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
|
||||
struct loop_device *lo = ptr;
|
||||
struct loop_func_table *xfer = data;
|
||||
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
if (lo->lo_encryption == xfer)
|
||||
loop_release_xfer(lo);
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2152,6 +2157,7 @@ static int loop_add(struct loop_device **l, int i)
|
||||
disk->flags |= GENHD_FL_NO_PART_SCAN;
|
||||
disk->flags |= GENHD_FL_EXT_DEVT;
|
||||
atomic_set(&lo->lo_refcnt, 0);
|
||||
mutex_init(&lo->lo_mutex);
|
||||
lo->lo_number = i;
|
||||
spin_lock_init(&lo->lo_lock);
|
||||
disk->major = LOOP_MAJOR;
|
||||
@ -2182,6 +2188,7 @@ static void loop_remove(struct loop_device *lo)
|
||||
blk_cleanup_queue(lo->lo_queue);
|
||||
blk_mq_free_tag_set(&lo->tag_set);
|
||||
put_disk(lo->lo_disk);
|
||||
mutex_destroy(&lo->lo_mutex);
|
||||
kfree(lo);
|
||||
}
|
||||
|
||||
@ -2261,15 +2268,21 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
|
||||
ret = loop_lookup(&lo, parm);
|
||||
if (ret < 0)
|
||||
break;
|
||||
ret = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (ret)
|
||||
break;
|
||||
if (lo->lo_state != Lo_unbound) {
|
||||
ret = -EBUSY;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
break;
|
||||
}
|
||||
if (atomic_read(&lo->lo_refcnt) > 0) {
|
||||
ret = -EBUSY;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
break;
|
||||
}
|
||||
lo->lo_disk->private_data = NULL;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
idr_remove(&loop_index_idr, lo->lo_number);
|
||||
loop_remove(lo);
|
||||
break;
|
||||
|
@ -62,6 +62,7 @@ struct loop_device {
|
||||
struct request_queue *lo_queue;
|
||||
struct blk_mq_tag_set tag_set;
|
||||
struct gendisk *lo_disk;
|
||||
struct mutex lo_mutex;
|
||||
};
|
||||
|
||||
struct loop_cmd {
|
||||
|
@ -3924,23 +3924,18 @@ static DEFINE_HANDLER(7);
|
||||
|
||||
static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
|
||||
{
|
||||
int pos;
|
||||
unsigned short pcie_dev_ctrl;
|
||||
|
||||
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
|
||||
if (pos) {
|
||||
pci_read_config_word(pdev,
|
||||
pos + PCI_EXP_DEVCTL,
|
||||
&pcie_dev_ctrl);
|
||||
if (pcie_dev_ctrl & (1 << 11) ||
|
||||
pcie_dev_ctrl & (1 << 4)) {
|
||||
if (pci_is_pcie(pdev)) {
|
||||
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_dev_ctrl);
|
||||
if (pcie_dev_ctrl & PCI_EXP_DEVCTL_NOSNOOP_EN ||
|
||||
pcie_dev_ctrl & PCI_EXP_DEVCTL_RELAX_EN) {
|
||||
dev_info(&dd->pdev->dev,
|
||||
"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
|
||||
pdev->vendor, pdev->device);
|
||||
pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
|
||||
PCI_EXP_DEVCTL_RELAX_EN);
|
||||
pci_write_config_word(pdev,
|
||||
pos + PCI_EXP_DEVCTL,
|
||||
pcie_capability_write_word(pdev, PCI_EXP_DEVCTL,
|
||||
pcie_dev_ctrl);
|
||||
}
|
||||
}
|
||||
|
@ -1529,17 +1529,7 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, nbd_dbg_tasks_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations nbd_dbg_tasks_ops = {
|
||||
.open = nbd_dbg_tasks_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
|
||||
|
||||
static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
@ -1564,17 +1554,7 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, nbd_dbg_flags_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations nbd_dbg_flags_ops = {
|
||||
.open = nbd_dbg_flags_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
|
||||
|
||||
static int nbd_dev_dbg_init(struct nbd_device *nbd)
|
||||
{
|
||||
@ -1592,11 +1572,11 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
|
||||
}
|
||||
config->dbg_dir = dir;
|
||||
|
||||
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
|
||||
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
|
||||
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
|
||||
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
|
||||
debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
|
||||
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
|
||||
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -944,8 +944,7 @@ failed_dma_setup:
|
||||
ctrl->done_wq = NULL;
|
||||
}
|
||||
|
||||
if (ctrl->trackers)
|
||||
vfree(ctrl->trackers);
|
||||
vfree(ctrl->trackers);
|
||||
|
||||
if (ctrl->status.buf)
|
||||
dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,322 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2012 STEC, Inc.
|
||||
* Copyright (c) 2017 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef SKD_S1120_H
|
||||
#define SKD_S1120_H
|
||||
|
||||
/*
|
||||
* Q-channel, 64-bit r/w
|
||||
*/
|
||||
#define FIT_Q_COMMAND 0x400u
|
||||
#define FIT_QCMD_QID_MASK (0x3 << 1)
|
||||
#define FIT_QCMD_QID0 (0x0 << 1)
|
||||
#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
|
||||
#define FIT_QCMD_QID1 (0x1 << 1)
|
||||
#define FIT_QCMD_QID2 (0x2 << 1)
|
||||
#define FIT_QCMD_QID3 (0x3 << 1)
|
||||
#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
|
||||
#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
|
||||
#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
|
||||
#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
|
||||
#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
|
||||
#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
|
||||
#define FIT_QCMD_ALIGN L1_CACHE_BYTES
|
||||
|
||||
/*
|
||||
* Control, 32-bit r/w
|
||||
*/
|
||||
#define FIT_CONTROL 0x500u
|
||||
#define FIT_CR_HARD_RESET (1u << 0u)
|
||||
#define FIT_CR_SOFT_RESET (1u << 1u)
|
||||
#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
|
||||
#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
|
||||
|
||||
/*
|
||||
* Status, 32-bit, r/o
|
||||
*/
|
||||
#define FIT_STATUS 0x510u
|
||||
#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu
|
||||
#define FIT_SR_SIGNATURE (0xFF << 8)
|
||||
#define FIT_SR_PIO_DMA (1 << 16)
|
||||
#define FIT_SR_DRIVE_OFFLINE 0x00
|
||||
#define FIT_SR_DRIVE_INIT 0x01
|
||||
/* #define FIT_SR_DRIVE_READY 0x02 */
|
||||
#define FIT_SR_DRIVE_ONLINE 0x03
|
||||
#define FIT_SR_DRIVE_BUSY 0x04
|
||||
#define FIT_SR_DRIVE_FAULT 0x05
|
||||
#define FIT_SR_DRIVE_DEGRADED 0x06
|
||||
#define FIT_SR_PCIE_LINK_DOWN 0x07
|
||||
#define FIT_SR_DRIVE_SOFT_RESET 0x08
|
||||
#define FIT_SR_DRIVE_INIT_FAULT 0x09
|
||||
#define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A
|
||||
#define FIT_SR_DRIVE_BUSY_ERASE 0x0B
|
||||
#define FIT_SR_DRIVE_FW_BOOTING 0x0C
|
||||
#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE
|
||||
#define FIT_SR_DEVICE_MISSING 0xFF
|
||||
#define FIT_SR__RESERVED 0xFFFFFF00u
|
||||
|
||||
/*
|
||||
* FIT_STATUS - Status register data definition
|
||||
*/
|
||||
#define FIT_SR_STATE_MASK (0xFF << 0)
|
||||
#define FIT_SR_SIGNATURE (0xFF << 8)
|
||||
#define FIT_SR_PIO_DMA (1 << 16)
|
||||
|
||||
/*
|
||||
* Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
|
||||
*/
|
||||
#define FIT_INT_STATUS_HOST 0x520u
|
||||
#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
|
||||
#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
|
||||
#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
|
||||
#define FIT_ISH_UNDEFINED_3 (1u << 3u)
|
||||
#define FIT_ISH_UNDEFINED_4 (1u << 4u)
|
||||
#define FIT_ISH_Q0_FULL (1u << 5u)
|
||||
#define FIT_ISH_Q1_FULL (1u << 6u)
|
||||
#define FIT_ISH_Q2_FULL (1u << 7u)
|
||||
#define FIT_ISH_Q3_FULL (1u << 8u)
|
||||
#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
|
||||
#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
|
||||
|
||||
#define FIT_INT_DEF_MASK \
|
||||
(FIT_ISH_FW_STATE_CHANGE | \
|
||||
FIT_ISH_COMPLETION_POSTED | \
|
||||
FIT_ISH_MSG_FROM_DEV | \
|
||||
FIT_ISH_Q0_FULL | \
|
||||
FIT_ISH_Q1_FULL | \
|
||||
FIT_ISH_Q2_FULL | \
|
||||
FIT_ISH_Q3_FULL | \
|
||||
FIT_ISH_QCMD_FIFO_OVERRUN | \
|
||||
FIT_ISH_BAD_EXP_ROM_READ)
|
||||
|
||||
#define FIT_INT_QUEUE_FULL \
|
||||
(FIT_ISH_Q0_FULL | \
|
||||
FIT_ISH_Q1_FULL | \
|
||||
FIT_ISH_Q2_FULL | \
|
||||
FIT_ISH_Q3_FULL)
|
||||
|
||||
#define MSI_MSG_NWL_ERROR_0 0x00000000
|
||||
#define MSI_MSG_NWL_ERROR_1 0x00000001
|
||||
#define MSI_MSG_NWL_ERROR_2 0x00000002
|
||||
#define MSI_MSG_NWL_ERROR_3 0x00000003
|
||||
#define MSI_MSG_STATE_CHANGE 0x00000004
|
||||
#define MSI_MSG_COMPLETION_POSTED 0x00000005
|
||||
#define MSI_MSG_MSG_FROM_DEV 0x00000006
|
||||
#define MSI_MSG_RESERVED_0 0x00000007
|
||||
#define MSI_MSG_RESERVED_1 0x00000008
|
||||
#define MSI_MSG_QUEUE_0_FULL 0x00000009
|
||||
#define MSI_MSG_QUEUE_1_FULL 0x0000000A
|
||||
#define MSI_MSG_QUEUE_2_FULL 0x0000000B
|
||||
#define MSI_MSG_QUEUE_3_FULL 0x0000000C
|
||||
|
||||
#define FIT_INT_RESERVED_MASK \
|
||||
(FIT_ISH_UNDEFINED_3 | \
|
||||
FIT_ISH_UNDEFINED_4)
|
||||
|
||||
/*
|
||||
* Interrupt mask, 32-bit r/w
|
||||
* Bit definitions are the same as FIT_INT_STATUS_HOST
|
||||
*/
|
||||
#define FIT_INT_MASK_HOST 0x528u
|
||||
|
||||
/*
|
||||
* Message to device, 32-bit r/w
|
||||
*/
|
||||
#define FIT_MSG_TO_DEVICE 0x540u
|
||||
|
||||
/*
|
||||
* Message from device, 32-bit, r/o
|
||||
*/
|
||||
#define FIT_MSG_FROM_DEVICE 0x548u
|
||||
|
||||
/*
|
||||
* 32-bit messages to/from device, composition/extraction macros
|
||||
*/
|
||||
#define FIT_MXD_CONS(TYPE, PARAM, DATA) \
|
||||
((((TYPE) & 0xFFu) << 24u) | \
|
||||
(((PARAM) & 0xFFu) << 16u) | \
|
||||
(((DATA) & 0xFFFFu) << 0u))
|
||||
#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
|
||||
#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
|
||||
#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
|
||||
|
||||
/*
|
||||
* Types of messages to/from device
|
||||
*/
|
||||
#define FIT_MTD_FITFW_INIT 0x01u
|
||||
#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
|
||||
#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
|
||||
#define FIT_MTD_SET_COMPQ_ADDR 0x04u
|
||||
#define FIT_MTD_ARM_QUEUE 0x05u
|
||||
#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
|
||||
#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
|
||||
#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
|
||||
#define FIT_MFD_SMART_EXCEEDED 0x10u
|
||||
#define FIT_MFD_POWER_DOWN 0x11u
|
||||
#define FIT_MFD_OFFLINE 0x12u
|
||||
#define FIT_MFD_ONLINE 0x13u
|
||||
#define FIT_MFD_FW_RESTARTING 0x14u
|
||||
#define FIT_MFD_PM_ACTIVE 0x15u
|
||||
#define FIT_MFD_PM_STANDBY 0x16u
|
||||
#define FIT_MFD_PM_SLEEP 0x17u
|
||||
#define FIT_MFD_CMD_PROGRESS 0x18u
|
||||
|
||||
#define FIT_MTD_DEBUG 0xFEu
|
||||
#define FIT_MFD_DEBUG 0xFFu
|
||||
|
||||
#define FIT_MFD_MASK (0xFFu)
|
||||
#define FIT_MFD_DATA_MASK (0xFFu)
|
||||
#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK)
|
||||
#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK)
|
||||
|
||||
/*
|
||||
* Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
|
||||
* Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
|
||||
* (was Response buffer in docs)
|
||||
*/
|
||||
#define FIT_MSG_TO_DEVICE_ARG 0x580u
|
||||
|
||||
/*
|
||||
* Hardware (ASIC) version, 32-bit r/o
|
||||
*/
|
||||
#define FIT_HW_VERSION 0x588u
|
||||
|
||||
/*
|
||||
* Scatter/gather list descriptor.
|
||||
* 32-bytes and must be aligned on a 32-byte boundary.
|
||||
* All fields are in little endian order.
|
||||
*/
|
||||
struct fit_sg_descriptor {
|
||||
uint32_t control;
|
||||
uint32_t byte_count;
|
||||
uint64_t host_side_addr;
|
||||
uint64_t dev_side_addr;
|
||||
uint64_t next_desc_ptr;
|
||||
};
|
||||
|
||||
#define FIT_SGD_CONTROL_NOT_LAST 0x000u
|
||||
#define FIT_SGD_CONTROL_LAST 0x40Eu
|
||||
|
||||
/*
|
||||
* Header at the beginning of a FIT message. The header
|
||||
* is followed by SSDI requests each 64 bytes.
|
||||
* A FIT message can be up to 512 bytes long and must start
|
||||
* on a 64-byte boundary.
|
||||
*/
|
||||
struct fit_msg_hdr {
|
||||
uint8_t protocol_id;
|
||||
uint8_t num_protocol_cmds_coalesced;
|
||||
uint8_t _reserved[62];
|
||||
};
|
||||
|
||||
#define FIT_PROTOCOL_ID_FIT 1
|
||||
#define FIT_PROTOCOL_ID_SSDI 2
|
||||
#define FIT_PROTOCOL_ID_SOFIT 3
|
||||
|
||||
|
||||
#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
|
||||
#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
|
||||
|
||||
/*
|
||||
* Format of a completion entry. The completion queue is circular
|
||||
* and must have at least as many entries as the maximum number
|
||||
* of commands that may be issued to the device.
|
||||
*
|
||||
* There are no head/tail pointers. The cycle value is used to
|
||||
* infer the presence of new completion records.
|
||||
* Initially the cycle in all entries is 0, the index is 0, and
|
||||
* the cycle value to expect is 1. When completions are added
|
||||
* their cycle values are set to 1. When the index wraps the
|
||||
* cycle value to expect is incremented.
|
||||
*
|
||||
* Command_context is opaque and taken verbatim from the SSDI command.
|
||||
* All other fields are big endian.
|
||||
*/
|
||||
#define FIT_PROTOCOL_VERSION_0 0
|
||||
|
||||
/*
|
||||
* Protocol major version 1 completion entry.
|
||||
* The major protocol version is found in bits
|
||||
* 20-23 of the FIT_MTD_FITFW_INIT response.
|
||||
*/
|
||||
struct fit_completion_entry_v1 {
|
||||
__be32 num_returned_bytes;
|
||||
uint16_t tag;
|
||||
uint8_t status; /* SCSI status */
|
||||
uint8_t cycle;
|
||||
};
|
||||
#define FIT_PROTOCOL_VERSION_1 1
|
||||
#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
|
||||
|
||||
struct fit_comp_error_info {
|
||||
uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */
|
||||
uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */
|
||||
uint8_t reserved0; /* 01: Obsolete field */
|
||||
uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */
|
||||
uint8_t reserved2:1; /* 02: Reserved bit. */
|
||||
uint8_t bad_length:1; /* 02: Incorrect Length Indicator */
|
||||
uint8_t end_medium:1; /* 02: End of Medium */
|
||||
uint8_t file_mark:1; /* 02: Filemark */
|
||||
uint8_t info[4]; /* 03: */
|
||||
uint8_t reserved1; /* 07: Additional Sense Length */
|
||||
uint8_t cmd_spec[4]; /* 08: Command Specific Information */
|
||||
uint8_t code; /* 0C: Additional Sense Code */
|
||||
uint8_t qual; /* 0D: Additional Sense Code Qualifier */
|
||||
uint8_t fruc; /* 0E: Field Replaceable Unit Code */
|
||||
uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */
|
||||
uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */
|
||||
uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
|
||||
uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
|
||||
uint16_t uec; /* 14: Additional Sense Bytes */
|
||||
uint64_t per __packed; /* 16: Additional Sense Bytes */
|
||||
uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
|
||||
};
|
||||
|
||||
|
||||
/* Task management constants */
|
||||
#define SOFT_TASK_SIMPLE 0x00
|
||||
#define SOFT_TASK_HEAD_OF_QUEUE 0x01
|
||||
#define SOFT_TASK_ORDERED 0x02
|
||||
|
||||
/* Version zero has the last 32 bits reserved,
|
||||
* Version one has the last 32 bits sg_list_len_bytes;
|
||||
*/
|
||||
struct skd_command_header {
|
||||
__be64 sg_list_dma_address;
|
||||
uint16_t tag;
|
||||
uint8_t attribute;
|
||||
uint8_t add_cdb_len; /* In 32 bit words */
|
||||
__be32 sg_list_len_bytes;
|
||||
};
|
||||
|
||||
struct skd_scsi_request {
|
||||
struct skd_command_header hdr;
|
||||
unsigned char cdb[16];
|
||||
/* unsigned char _reserved[16]; */
|
||||
};
|
||||
|
||||
struct driver_inquiry_data {
|
||||
uint8_t peripheral_device_type:5;
|
||||
uint8_t qualifier:3;
|
||||
uint8_t page_code;
|
||||
__be16 page_length;
|
||||
__be16 pcie_bus_number;
|
||||
uint8_t pcie_device_number;
|
||||
uint8_t pcie_function_number;
|
||||
uint8_t pcie_link_speed;
|
||||
uint8_t pcie_link_lanes;
|
||||
__be16 pcie_vendor_id;
|
||||
__be16 pcie_device_id;
|
||||
__be16 pcie_subsystem_vendor_id;
|
||||
__be16 pcie_subsystem_device_id;
|
||||
uint8_t reserved1[2];
|
||||
uint8_t reserved2[3];
|
||||
uint8_t driver_version_length;
|
||||
uint8_t driver_version[0x14];
|
||||
};
|
||||
|
||||
#endif /* SKD_S1120_H */
|
@ -530,8 +530,7 @@ static ssize_t backing_dev_store(struct device *dev,
|
||||
|
||||
return len;
|
||||
out:
|
||||
if (bitmap)
|
||||
kvfree(bitmap);
|
||||
kvfree(bitmap);
|
||||
|
||||
if (bdev)
|
||||
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
|
||||
|
@ -988,7 +988,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
|
||||
bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
|
||||
|
||||
smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
|
||||
guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
|
||||
export_guid(smeta_buf->header.uuid, &pblk->instance_uuid);
|
||||
smeta_buf->header.id = cpu_to_le32(line->id);
|
||||
smeta_buf->header.type = cpu_to_le16(line->type);
|
||||
smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
|
||||
@ -1803,8 +1803,7 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
|
||||
|
||||
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
|
||||
emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
|
||||
guid_copy((guid_t *)&emeta_buf->header.uuid,
|
||||
&pblk->instance_uuid);
|
||||
export_guid(emeta_buf->header.uuid, &pblk->instance_uuid);
|
||||
emeta_buf->header.id = cpu_to_le32(line->id);
|
||||
emeta_buf->header.type = cpu_to_le16(line->type);
|
||||
emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
|
||||
|
@ -23,8 +23,7 @@
|
||||
|
||||
static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
|
||||
{
|
||||
if (gc_rq->data)
|
||||
vfree(gc_rq->data);
|
||||
vfree(gc_rq->data);
|
||||
kfree(gc_rq);
|
||||
}
|
||||
|
||||
|
@ -706,8 +706,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
|
||||
|
||||
/* The first valid instance uuid is used for initialization */
|
||||
if (!valid_uuid) {
|
||||
guid_copy(&pblk->instance_uuid,
|
||||
(guid_t *)&smeta_buf->header.uuid);
|
||||
import_guid(&pblk->instance_uuid, smeta_buf->header.uuid);
|
||||
valid_uuid = 1;
|
||||
}
|
||||
|
||||
|
@ -373,6 +373,7 @@ struct cached_dev {
|
||||
unsigned int partial_stripes_expensive:1;
|
||||
unsigned int writeback_metadata:1;
|
||||
unsigned int writeback_running:1;
|
||||
unsigned int writeback_consider_fragment:1;
|
||||
unsigned char writeback_percent;
|
||||
unsigned int writeback_delay;
|
||||
|
||||
@ -385,6 +386,9 @@ struct cached_dev {
|
||||
unsigned int writeback_rate_update_seconds;
|
||||
unsigned int writeback_rate_i_term_inverse;
|
||||
unsigned int writeback_rate_p_term_inverse;
|
||||
unsigned int writeback_rate_fp_term_low;
|
||||
unsigned int writeback_rate_fp_term_mid;
|
||||
unsigned int writeback_rate_fp_term_high;
|
||||
unsigned int writeback_rate_minimum;
|
||||
|
||||
enum stop_on_failure stop_when_cache_set_failed;
|
||||
@ -1001,6 +1005,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
|
||||
|
||||
extern struct workqueue_struct *bcache_wq;
|
||||
extern struct workqueue_struct *bch_journal_wq;
|
||||
extern struct workqueue_struct *bch_flush_wq;
|
||||
extern struct mutex bch_register_lock;
|
||||
extern struct list_head bch_cache_sets;
|
||||
|
||||
@ -1042,5 +1047,7 @@ void bch_debug_exit(void);
|
||||
void bch_debug_init(void);
|
||||
void bch_request_exit(void);
|
||||
int bch_request_init(void);
|
||||
void bch_btree_exit(void);
|
||||
int bch_btree_init(void);
|
||||
|
||||
#endif /* _BCACHE_H */
|
||||
|
@ -712,8 +712,10 @@ void bch_bset_build_written_tree(struct btree_keys *b)
|
||||
for (j = inorder_next(0, t->size);
|
||||
j;
|
||||
j = inorder_next(j, t->size)) {
|
||||
while (bkey_to_cacheline(t, k) < cacheline)
|
||||
prev = k, k = bkey_next(k);
|
||||
while (bkey_to_cacheline(t, k) < cacheline) {
|
||||
prev = k;
|
||||
k = bkey_next(k);
|
||||
}
|
||||
|
||||
t->prev[j] = bkey_u64s(prev);
|
||||
t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
|
||||
@ -901,8 +903,10 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
||||
status = BTREE_INSERT_STATUS_INSERT;
|
||||
|
||||
while (m != bset_bkey_last(i) &&
|
||||
bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
|
||||
prev = m, m = bkey_next(m);
|
||||
bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) {
|
||||
prev = m;
|
||||
m = bkey_next(m);
|
||||
}
|
||||
|
||||
/* prev is in the tree, if we merge we're done */
|
||||
status = BTREE_INSERT_STATUS_BACK_MERGE;
|
||||
|
@ -99,6 +99,8 @@
|
||||
#define PTR_HASH(c, k) \
|
||||
(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
|
||||
|
||||
static struct workqueue_struct *btree_io_wq;
|
||||
|
||||
#define insert_lock(s, b) ((b)->level <= (s)->lock)
|
||||
|
||||
|
||||
@ -308,7 +310,7 @@ static void __btree_node_write_done(struct closure *cl)
|
||||
btree_complete_write(b, w);
|
||||
|
||||
if (btree_node_dirty(b))
|
||||
schedule_delayed_work(&b->work, 30 * HZ);
|
||||
queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
|
||||
|
||||
closure_return_with_destructor(cl, btree_node_write_unlock);
|
||||
}
|
||||
@ -481,7 +483,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
|
||||
BUG_ON(!i->keys);
|
||||
|
||||
if (!btree_node_dirty(b))
|
||||
schedule_delayed_work(&b->work, 30 * HZ);
|
||||
queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
|
||||
|
||||
set_btree_node_dirty(b);
|
||||
|
||||
@ -2764,3 +2766,18 @@ void bch_keybuf_init(struct keybuf *buf)
|
||||
spin_lock_init(&buf->lock);
|
||||
array_allocator_init(&buf->freelist);
|
||||
}
|
||||
|
||||
void bch_btree_exit(void)
|
||||
{
|
||||
if (btree_io_wq)
|
||||
destroy_workqueue(btree_io_wq);
|
||||
}
|
||||
|
||||
int __init bch_btree_init(void)
|
||||
{
|
||||
btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
|
||||
if (!btree_io_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -932,8 +932,8 @@ atomic_t *bch_journal(struct cache_set *c,
|
||||
journal_try_write(c);
|
||||
} else if (!w->dirty) {
|
||||
w->dirty = true;
|
||||
schedule_delayed_work(&c->journal.work,
|
||||
msecs_to_jiffies(c->journal_delay_ms));
|
||||
queue_delayed_work(bch_flush_wq, &c->journal.work,
|
||||
msecs_to_jiffies(c->journal_delay_ms));
|
||||
spin_unlock(&c->journal.lock);
|
||||
} else {
|
||||
spin_unlock(&c->journal.lock);
|
||||
|
@ -49,6 +49,7 @@ static int bcache_major;
|
||||
static DEFINE_IDA(bcache_device_idx);
|
||||
static wait_queue_head_t unregister_wait;
|
||||
struct workqueue_struct *bcache_wq;
|
||||
struct workqueue_struct *bch_flush_wq;
|
||||
struct workqueue_struct *bch_journal_wq;
|
||||
|
||||
|
||||
@ -2517,7 +2518,7 @@ out:
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void register_device_aync(struct async_reg_args *args)
|
||||
static void register_device_async(struct async_reg_args *args)
|
||||
{
|
||||
if (SB_IS_BDEV(args->sb))
|
||||
INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
|
||||
@ -2611,7 +2612,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
|
||||
args->sb = sb;
|
||||
args->sb_disk = sb_disk;
|
||||
args->bdev = bdev;
|
||||
register_device_aync(args);
|
||||
register_device_async(args);
|
||||
/* No wait and returns to user space */
|
||||
goto async_done;
|
||||
}
|
||||
@ -2821,6 +2822,9 @@ static void bcache_exit(void)
|
||||
destroy_workqueue(bcache_wq);
|
||||
if (bch_journal_wq)
|
||||
destroy_workqueue(bch_journal_wq);
|
||||
if (bch_flush_wq)
|
||||
destroy_workqueue(bch_flush_wq);
|
||||
bch_btree_exit();
|
||||
|
||||
if (bcache_major)
|
||||
unregister_blkdev(bcache_major, "bcache");
|
||||
@ -2876,10 +2880,26 @@ static int __init bcache_init(void)
|
||||
return bcache_major;
|
||||
}
|
||||
|
||||
if (bch_btree_init())
|
||||
goto err;
|
||||
|
||||
bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
|
||||
if (!bcache_wq)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
|
||||
*
|
||||
* 1. It used `system_wq` before which also does no memory reclaim.
|
||||
* 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
|
||||
* reduced throughput can be observed.
|
||||
*
|
||||
* We still want to user our own queue to not congest the `system_wq`.
|
||||
*/
|
||||
bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
|
||||
if (!bch_flush_wq)
|
||||
goto err;
|
||||
|
||||
bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
|
||||
if (!bch_journal_wq)
|
||||
goto err;
|
||||
|
@ -117,10 +117,14 @@ rw_attribute(writeback_running);
|
||||
rw_attribute(writeback_percent);
|
||||
rw_attribute(writeback_delay);
|
||||
rw_attribute(writeback_rate);
|
||||
rw_attribute(writeback_consider_fragment);
|
||||
|
||||
rw_attribute(writeback_rate_update_seconds);
|
||||
rw_attribute(writeback_rate_i_term_inverse);
|
||||
rw_attribute(writeback_rate_p_term_inverse);
|
||||
rw_attribute(writeback_rate_fp_term_low);
|
||||
rw_attribute(writeback_rate_fp_term_mid);
|
||||
rw_attribute(writeback_rate_fp_term_high);
|
||||
rw_attribute(writeback_rate_minimum);
|
||||
read_attribute(writeback_rate_debug);
|
||||
|
||||
@ -195,6 +199,7 @@ SHOW(__bch_cached_dev)
|
||||
var_printf(bypass_torture_test, "%i");
|
||||
var_printf(writeback_metadata, "%i");
|
||||
var_printf(writeback_running, "%i");
|
||||
var_printf(writeback_consider_fragment, "%i");
|
||||
var_print(writeback_delay);
|
||||
var_print(writeback_percent);
|
||||
sysfs_hprint(writeback_rate,
|
||||
@ -205,6 +210,9 @@ SHOW(__bch_cached_dev)
|
||||
var_print(writeback_rate_update_seconds);
|
||||
var_print(writeback_rate_i_term_inverse);
|
||||
var_print(writeback_rate_p_term_inverse);
|
||||
var_print(writeback_rate_fp_term_low);
|
||||
var_print(writeback_rate_fp_term_mid);
|
||||
var_print(writeback_rate_fp_term_high);
|
||||
var_print(writeback_rate_minimum);
|
||||
|
||||
if (attr == &sysfs_writeback_rate_debug) {
|
||||
@ -303,6 +311,7 @@ STORE(__cached_dev)
|
||||
sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
|
||||
sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
|
||||
sysfs_strtoul_bool(writeback_running, dc->writeback_running);
|
||||
sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment);
|
||||
sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
|
||||
|
||||
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
|
||||
@ -331,6 +340,16 @@ STORE(__cached_dev)
|
||||
sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
|
||||
dc->writeback_rate_p_term_inverse,
|
||||
1, UINT_MAX);
|
||||
sysfs_strtoul_clamp(writeback_rate_fp_term_low,
|
||||
dc->writeback_rate_fp_term_low,
|
||||
1, dc->writeback_rate_fp_term_mid - 1);
|
||||
sysfs_strtoul_clamp(writeback_rate_fp_term_mid,
|
||||
dc->writeback_rate_fp_term_mid,
|
||||
dc->writeback_rate_fp_term_low + 1,
|
||||
dc->writeback_rate_fp_term_high - 1);
|
||||
sysfs_strtoul_clamp(writeback_rate_fp_term_high,
|
||||
dc->writeback_rate_fp_term_high,
|
||||
dc->writeback_rate_fp_term_mid + 1, UINT_MAX);
|
||||
sysfs_strtoul_clamp(writeback_rate_minimum,
|
||||
dc->writeback_rate_minimum,
|
||||
1, UINT_MAX);
|
||||
@ -499,9 +518,13 @@ static struct attribute *bch_cached_dev_files[] = {
|
||||
&sysfs_writeback_delay,
|
||||
&sysfs_writeback_percent,
|
||||
&sysfs_writeback_rate,
|
||||
&sysfs_writeback_consider_fragment,
|
||||
&sysfs_writeback_rate_update_seconds,
|
||||
&sysfs_writeback_rate_i_term_inverse,
|
||||
&sysfs_writeback_rate_p_term_inverse,
|
||||
&sysfs_writeback_rate_fp_term_low,
|
||||
&sysfs_writeback_rate_fp_term_mid,
|
||||
&sysfs_writeback_rate_fp_term_high,
|
||||
&sysfs_writeback_rate_minimum,
|
||||
&sysfs_writeback_rate_debug,
|
||||
&sysfs_io_errors,
|
||||
@ -1071,8 +1094,10 @@ SHOW(__bch_cache)
|
||||
--n;
|
||||
|
||||
while (cached < p + n &&
|
||||
*cached == BTREE_PRIO)
|
||||
cached++, n--;
|
||||
*cached == BTREE_PRIO) {
|
||||
cached++;
|
||||
n--;
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
sum += INITIAL_PRIO - cached[i];
|
||||
|
@ -88,6 +88,44 @@ static void __update_writeback_rate(struct cached_dev *dc)
|
||||
int64_t integral_scaled;
|
||||
uint32_t new_rate;
|
||||
|
||||
/*
|
||||
* We need to consider the number of dirty buckets as well
|
||||
* when calculating the proportional_scaled, Otherwise we might
|
||||
* have an unreasonable small writeback rate at a highly fragmented situation
|
||||
* when very few dirty sectors consumed a lot dirty buckets, the
|
||||
* worst case is when dirty buckets reached cutoff_writeback_sync and
|
||||
* dirty data is still not even reached to writeback percent, so the rate
|
||||
* still will be at the minimum value, which will cause the write
|
||||
* stuck at a non-writeback mode.
|
||||
*/
|
||||
struct cache_set *c = dc->disk.c;
|
||||
|
||||
int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets;
|
||||
|
||||
if (dc->writeback_consider_fragment &&
|
||||
c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) {
|
||||
int64_t fragment =
|
||||
div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty);
|
||||
int64_t fp_term;
|
||||
int64_t fps;
|
||||
|
||||
if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
|
||||
fp_term = dc->writeback_rate_fp_term_low *
|
||||
(c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
|
||||
} else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
|
||||
fp_term = dc->writeback_rate_fp_term_mid *
|
||||
(c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
|
||||
} else {
|
||||
fp_term = dc->writeback_rate_fp_term_high *
|
||||
(c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
|
||||
}
|
||||
fps = div_s64(dirty, dirty_buckets) * fp_term;
|
||||
if (fragment > 3 && fps > proportional_scaled) {
|
||||
/* Only overrite the p when fragment > 3 */
|
||||
proportional_scaled = fps;
|
||||
}
|
||||
}
|
||||
|
||||
if ((error < 0 && dc->writeback_rate_integral > 0) ||
|
||||
(error > 0 && time_before64(local_clock(),
|
||||
dc->writeback_rate.next + NSEC_PER_MSEC))) {
|
||||
@ -977,6 +1015,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
||||
|
||||
dc->writeback_metadata = true;
|
||||
dc->writeback_running = false;
|
||||
dc->writeback_consider_fragment = true;
|
||||
dc->writeback_percent = 10;
|
||||
dc->writeback_delay = 30;
|
||||
atomic_long_set(&dc->writeback_rate.rate, 1024);
|
||||
@ -984,6 +1023,9 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
||||
|
||||
dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
|
||||
dc->writeback_rate_p_term_inverse = 40;
|
||||
dc->writeback_rate_fp_term_low = 1;
|
||||
dc->writeback_rate_fp_term_mid = 10;
|
||||
dc->writeback_rate_fp_term_high = 1000;
|
||||
dc->writeback_rate_i_term_inverse = 10000;
|
||||
|
||||
WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
|
||||
|
@ -16,6 +16,10 @@
|
||||
|
||||
#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
|
||||
|
||||
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50
|
||||
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
|
||||
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
|
||||
|
||||
#define BCH_DIRTY_INIT_THRD_MAX 64
|
||||
/*
|
||||
* 14 (16384ths) is chosen here as something that each backing device
|
||||
|
@ -7643,7 +7643,7 @@ static int raid5_run(struct mddev *mddev)
|
||||
}
|
||||
|
||||
/* device size must be a multiple of chunk size */
|
||||
mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
|
||||
mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1);
|
||||
mddev->resync_max_sectors = mddev->dev_sectors;
|
||||
|
||||
if (mddev->degraded > dirty_parity_disks &&
|
||||
|
@ -279,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status)
|
||||
|
||||
static void nvme_retry_req(struct request *req)
|
||||
{
|
||||
struct nvme_ns *ns = req->q->queuedata;
|
||||
unsigned long delay = 0;
|
||||
u16 crd;
|
||||
|
||||
/* The mask and shift result must be <= 3 */
|
||||
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
|
||||
if (ns && crd)
|
||||
delay = ns->ctrl->crdt[crd - 1] * 100;
|
||||
if (crd)
|
||||
delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
|
||||
|
||||
nvme_req(req)->retries++;
|
||||
blk_mq_requeue_request(req, false);
|
||||
@ -356,6 +355,21 @@ void nvme_complete_rq(struct request *req)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_complete_rq);
|
||||
|
||||
/*
|
||||
* Called to unwind from ->queue_rq on a failed command submission so that the
|
||||
* multipathing code gets called to potentially failover to another path.
|
||||
* The caller needs to unwind all transport specific resource allocations and
|
||||
* must return propagate the return value.
|
||||
*/
|
||||
blk_status_t nvme_host_path_error(struct request *req)
|
||||
{
|
||||
nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
|
||||
blk_mq_set_request_complete(req);
|
||||
nvme_complete_rq(req);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_host_path_error);
|
||||
|
||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
|
||||
@ -371,6 +385,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cancel_request);
|
||||
|
||||
void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (ctrl->tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->tagset);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
|
||||
|
||||
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (ctrl->admin_tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
|
||||
|
||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
enum nvme_ctrl_state new_state)
|
||||
{
|
||||
@ -842,11 +876,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
||||
void nvme_cleanup_cmd(struct request *req)
|
||||
{
|
||||
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
struct nvme_ns *ns = req->rq_disk->private_data;
|
||||
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
|
||||
struct page *page = req->special_vec.bv_page;
|
||||
|
||||
if (page == ns->ctrl->discard_page)
|
||||
clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
|
||||
if (page == ctrl->discard_page)
|
||||
clear_bit_unlock(0, &ctrl->discard_page_busy);
|
||||
else
|
||||
kfree(page_address(page) + req->special_vec.bv_offset);
|
||||
}
|
||||
@ -2831,7 +2865,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
|
||||
struct nvme_subsystem *subsys =
|
||||
container_of(dev, struct nvme_subsystem, dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
|
||||
return sysfs_emit(buf, "%s\n", subsys->subnqn);
|
||||
}
|
||||
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
|
||||
|
||||
@ -2861,7 +2895,7 @@ static struct attribute *nvme_subsys_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nvme_subsys_attrs_group = {
|
||||
static const struct attribute_group nvme_subsys_attrs_group = {
|
||||
.attrs = nvme_subsys_attrs,
|
||||
};
|
||||
|
||||
@ -3524,7 +3558,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
|
||||
return sysfs_emit(buf, "%s\n", ctrl->ops->name);
|
||||
}
|
||||
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
|
||||
|
||||
@ -3558,7 +3592,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
|
||||
return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
|
||||
}
|
||||
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
|
||||
|
||||
@ -3568,7 +3602,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
|
||||
return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
|
||||
}
|
||||
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
|
||||
|
||||
@ -3578,7 +3612,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev,
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
|
||||
return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
|
||||
}
|
||||
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
|
||||
|
||||
@ -3696,7 +3730,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
|
||||
return a->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group nvme_dev_attrs_group = {
|
||||
static const struct attribute_group nvme_dev_attrs_group = {
|
||||
.attrs = nvme_dev_attrs,
|
||||
.is_visible = nvme_dev_attrs_are_visible,
|
||||
};
|
||||
@ -4439,6 +4473,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
nvme_hwmon_exit(ctrl);
|
||||
nvme_fault_inject_fini(&ctrl->fault_inject);
|
||||
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
||||
cdev_device_del(&ctrl->cdev, ctrl->device);
|
||||
@ -4451,7 +4486,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl)
|
||||
struct nvme_effects_log *cel;
|
||||
unsigned long i;
|
||||
|
||||
xa_for_each (&ctrl->cels, i, cel) {
|
||||
xa_for_each(&ctrl->cels, i, cel) {
|
||||
xa_erase(&ctrl->cels, i);
|
||||
kfree(cel);
|
||||
}
|
||||
|
@ -552,11 +552,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
|
||||
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
|
||||
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
|
||||
blk_mq_start_request(rq);
|
||||
nvme_complete_rq(rq);
|
||||
return BLK_STS_OK;
|
||||
return nvme_host_path_error(rq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
|
||||
|
||||
|
@ -3789,7 +3789,7 @@ static struct attribute *nvme_fc_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group nvme_fc_attr_group = {
|
||||
static const struct attribute_group nvme_fc_attr_group = {
|
||||
.attrs = nvme_fc_attrs,
|
||||
};
|
||||
|
||||
|
@ -223,12 +223,12 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
|
||||
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct device *dev = ctrl->dev;
|
||||
struct device *dev = ctrl->device;
|
||||
struct nvme_hwmon_data *data;
|
||||
struct device *hwmon;
|
||||
int err;
|
||||
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
@ -237,19 +237,30 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
|
||||
err = nvme_hwmon_get_smart_log(data);
|
||||
if (err) {
|
||||
dev_warn(ctrl->device,
|
||||
"Failed to read smart log (error %d)\n", err);
|
||||
devm_kfree(dev, data);
|
||||
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
|
||||
kfree(data);
|
||||
return err;
|
||||
}
|
||||
|
||||
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
|
||||
&nvme_hwmon_chip_info,
|
||||
NULL);
|
||||
hwmon = hwmon_device_register_with_info(dev, "nvme",
|
||||
data, &nvme_hwmon_chip_info,
|
||||
NULL);
|
||||
if (IS_ERR(hwmon)) {
|
||||
dev_warn(dev, "Failed to instantiate hwmon device\n");
|
||||
devm_kfree(dev, data);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
ctrl->hwmon_device = hwmon;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (ctrl->hwmon_device) {
|
||||
struct nvme_hwmon_data *data =
|
||||
dev_get_drvdata(ctrl->hwmon_device);
|
||||
|
||||
hwmon_device_unregister(ctrl->hwmon_device);
|
||||
ctrl->hwmon_device = NULL;
|
||||
kfree(data);
|
||||
}
|
||||
}
|
||||
|
@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||
if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
|
||||
ns->head->disk->queue);
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
|
||||
ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
|
||||
#endif
|
||||
}
|
||||
|
||||
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
|
||||
|
@ -144,6 +144,12 @@ enum nvme_quirks {
|
||||
* NVMe 1.3 compliance.
|
||||
*/
|
||||
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
|
||||
|
||||
/*
|
||||
* The controller does not properly handle DMA addresses over
|
||||
* 48 bits.
|
||||
*/
|
||||
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
|
||||
};
|
||||
|
||||
/*
|
||||
@ -246,6 +252,9 @@ struct nvme_ctrl {
|
||||
struct rw_semaphore namespaces_rwsem;
|
||||
struct device ctrl_device;
|
||||
struct device *device; /* char device */
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
struct device *hwmon_device;
|
||||
#endif
|
||||
struct cdev cdev;
|
||||
struct work_struct reset_work;
|
||||
struct work_struct delete_work;
|
||||
@ -575,7 +584,10 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
|
||||
}
|
||||
|
||||
void nvme_complete_rq(struct request *req);
|
||||
blk_status_t nvme_host_path_error(struct request *req);
|
||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
|
||||
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
|
||||
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
|
||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
enum nvme_ctrl_state new_state);
|
||||
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
|
||||
@ -809,11 +821,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
|
||||
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
|
||||
void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
|
||||
#else
|
||||
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
@ -2362,13 +2362,16 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
||||
{
|
||||
int result = -ENOMEM;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
int dma_address_bits = 64;
|
||||
|
||||
if (pci_enable_device_mem(pdev))
|
||||
return result;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
|
||||
if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
|
||||
dma_address_bits = 48;
|
||||
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
|
||||
goto disable;
|
||||
|
||||
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
|
||||
@ -3263,6 +3266,18 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
|
||||
|
@ -919,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
|
||||
error = nvme_init_identify(&ctrl->ctrl);
|
||||
if (error)
|
||||
goto out_stop_queue;
|
||||
goto out_quiesce_queue;
|
||||
|
||||
return 0;
|
||||
|
||||
out_quiesce_queue:
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_sync_queue(ctrl->ctrl.admin_q);
|
||||
out_stop_queue:
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
||||
out_cleanup_queue:
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
@ -1001,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
|
||||
|
||||
out_wait_freeze_timed_out:
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
nvme_sync_io_queues(&ctrl->ctrl);
|
||||
nvme_rdma_stop_io_queues(ctrl);
|
||||
out_cleanup_connect_q:
|
||||
nvme_cancel_tagset(&ctrl->ctrl);
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
||||
out_free_tag_set:
|
||||
@ -1019,11 +1025,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_sync_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
if (ctrl->ctrl.admin_tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
|
||||
}
|
||||
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
||||
if (remove)
|
||||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_destroy_admin_queue(ctrl, remove);
|
||||
@ -1037,11 +1039,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
nvme_sync_io_queues(&ctrl->ctrl);
|
||||
nvme_rdma_stop_io_queues(ctrl);
|
||||
if (ctrl->ctrl.tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
|
||||
}
|
||||
nvme_cancel_tagset(&ctrl->ctrl);
|
||||
if (remove)
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
nvme_rdma_destroy_io_queues(ctrl, remove);
|
||||
@ -1144,10 +1142,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
|
||||
return 0;
|
||||
|
||||
destroy_io:
|
||||
if (ctrl->ctrl.queue_count > 1)
|
||||
if (ctrl->ctrl.queue_count > 1) {
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
nvme_sync_io_queues(&ctrl->ctrl);
|
||||
nvme_rdma_stop_io_queues(ctrl);
|
||||
nvme_cancel_tagset(&ctrl->ctrl);
|
||||
nvme_rdma_destroy_io_queues(ctrl, new);
|
||||
}
|
||||
destroy_admin:
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_sync_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
||||
nvme_rdma_destroy_admin_queue(ctrl, new);
|
||||
return ret;
|
||||
}
|
||||
@ -2092,7 +2098,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
err_unmap:
|
||||
nvme_rdma_unmap_data(queue, rq);
|
||||
err:
|
||||
if (err == -ENOMEM || err == -EAGAIN)
|
||||
if (err == -EIO)
|
||||
ret = nvme_host_path_error(rq);
|
||||
else if (err == -ENOMEM || err == -EAGAIN)
|
||||
ret = BLK_STS_RESOURCE;
|
||||
else
|
||||
ret = BLK_STS_IOERR;
|
||||
|
@ -206,11 +206,6 @@ static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
|
||||
req->pdu_len - req->pdu_sent);
|
||||
}
|
||||
|
||||
static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
|
||||
{
|
||||
return req->iter.iov_offset;
|
||||
}
|
||||
|
||||
static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
|
||||
{
|
||||
return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
|
||||
@ -229,24 +224,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
|
||||
struct request *rq = blk_mq_rq_from_pdu(req);
|
||||
struct bio_vec *vec;
|
||||
unsigned int size;
|
||||
int nsegs;
|
||||
int nr_bvec;
|
||||
size_t offset;
|
||||
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
vec = &rq->special_vec;
|
||||
nsegs = 1;
|
||||
nr_bvec = 1;
|
||||
size = blk_rq_payload_bytes(rq);
|
||||
offset = 0;
|
||||
} else {
|
||||
struct bio *bio = req->curr_bio;
|
||||
struct bvec_iter bi;
|
||||
struct bio_vec bv;
|
||||
|
||||
vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
|
||||
nsegs = bio_segments(bio);
|
||||
nr_bvec = 0;
|
||||
bio_for_each_bvec(bv, bio, bi) {
|
||||
nr_bvec++;
|
||||
}
|
||||
size = bio->bi_iter.bi_size;
|
||||
offset = bio->bi_iter.bi_bvec_done;
|
||||
}
|
||||
|
||||
iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
|
||||
iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
|
||||
req->iter.iov_offset = offset;
|
||||
}
|
||||
|
||||
@ -983,7 +983,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
|
||||
req->state = NVME_TCP_SEND_DATA;
|
||||
if (queue->data_digest)
|
||||
crypto_ahash_init(queue->snd_hash);
|
||||
nvme_tcp_init_iter(req, WRITE);
|
||||
} else {
|
||||
nvme_tcp_done_send_req(queue);
|
||||
}
|
||||
@ -1016,8 +1015,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
||||
req->state = NVME_TCP_SEND_DATA;
|
||||
if (queue->data_digest)
|
||||
crypto_ahash_init(queue->snd_hash);
|
||||
if (!req->data_sent)
|
||||
nvme_tcp_init_iter(req, WRITE);
|
||||
return 1;
|
||||
}
|
||||
req->offset += ret;
|
||||
@ -1815,8 +1812,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
|
||||
|
||||
out_wait_freeze_timed_out:
|
||||
nvme_stop_queues(ctrl);
|
||||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
out_cleanup_connect_q:
|
||||
nvme_cancel_tagset(ctrl);
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->connect_q);
|
||||
out_free_tag_set:
|
||||
@ -1878,12 +1877,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
|
||||
|
||||
error = nvme_init_identify(ctrl);
|
||||
if (error)
|
||||
goto out_stop_queue;
|
||||
goto out_quiesce_queue;
|
||||
|
||||
return 0;
|
||||
|
||||
out_quiesce_queue:
|
||||
blk_mq_quiesce_queue(ctrl->admin_q);
|
||||
blk_sync_queue(ctrl->admin_q);
|
||||
out_stop_queue:
|
||||
nvme_tcp_stop_queue(ctrl, 0);
|
||||
nvme_cancel_admin_tagset(ctrl);
|
||||
out_cleanup_queue:
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->admin_q);
|
||||
@ -1904,11 +1907,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
|
||||
blk_mq_quiesce_queue(ctrl->admin_q);
|
||||
blk_sync_queue(ctrl->admin_q);
|
||||
nvme_tcp_stop_queue(ctrl, 0);
|
||||
if (ctrl->admin_tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
|
||||
}
|
||||
nvme_cancel_admin_tagset(ctrl);
|
||||
if (remove)
|
||||
blk_mq_unquiesce_queue(ctrl->admin_q);
|
||||
nvme_tcp_destroy_admin_queue(ctrl, remove);
|
||||
@ -1924,11 +1923,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
|
||||
nvme_stop_queues(ctrl);
|
||||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
if (ctrl->tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->tagset);
|
||||
}
|
||||
nvme_cancel_tagset(ctrl);
|
||||
if (remove)
|
||||
nvme_start_queues(ctrl);
|
||||
nvme_tcp_destroy_io_queues(ctrl, remove);
|
||||
@ -2003,10 +1998,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
|
||||
return 0;
|
||||
|
||||
destroy_io:
|
||||
if (ctrl->queue_count > 1)
|
||||
if (ctrl->queue_count > 1) {
|
||||
nvme_stop_queues(ctrl);
|
||||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
nvme_cancel_tagset(ctrl);
|
||||
nvme_tcp_destroy_io_queues(ctrl, new);
|
||||
}
|
||||
destroy_admin:
|
||||
blk_mq_quiesce_queue(ctrl->admin_q);
|
||||
blk_sync_queue(ctrl->admin_q);
|
||||
nvme_tcp_stop_queue(ctrl, 0);
|
||||
nvme_cancel_admin_tagset(ctrl);
|
||||
nvme_tcp_destroy_admin_queue(ctrl, new);
|
||||
return ret;
|
||||
}
|
||||
@ -2268,12 +2271,12 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
|
||||
req->data_len = blk_rq_nr_phys_segments(rq) ?
|
||||
blk_rq_payload_bytes(rq) : 0;
|
||||
req->curr_bio = rq->bio;
|
||||
if (req->curr_bio && req->data_len)
|
||||
nvme_tcp_init_iter(req, rq_data_dir(rq));
|
||||
|
||||
if (rq_data_dir(rq) == WRITE &&
|
||||
req->data_len <= nvme_tcp_inline_data_size(queue))
|
||||
req->pdu_len = req->data_len;
|
||||
else if (req->curr_bio)
|
||||
nvme_tcp_init_iter(req, READ);
|
||||
|
||||
pdu->hdr.type = nvme_tcp_cmd;
|
||||
pdu->hdr.flags = 0;
|
||||
|
@ -102,6 +102,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
u8 lbaf = cdw10[0] & 0xF;
|
||||
u8 mset = (cdw10[0] >> 4) & 0x1;
|
||||
u8 pi = (cdw10[0] >> 5) & 0x7;
|
||||
u8 pil = cdw10[1] & 0x1;
|
||||
u8 ses = (cdw10[1] >> 1) & 0x7;
|
||||
|
||||
trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u",
|
||||
lbaf, mset, pi, pil, ses);
|
||||
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
@ -131,6 +148,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
u64 slba = get_unaligned_le64(cdw10);
|
||||
u8 zsa = cdw10[12];
|
||||
u8 all = cdw10[13];
|
||||
|
||||
trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
u64 slba = get_unaligned_le64(cdw10);
|
||||
u32 numd = get_unaligned_le32(cdw10 + 8);
|
||||
u8 zra = cdw10[12];
|
||||
u8 zrasf = cdw10[13];
|
||||
u8 pr = cdw10[14];
|
||||
|
||||
trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
|
||||
slba, numd, zra, zrasf, pr);
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
@ -159,6 +205,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
|
||||
return nvme_trace_admin_get_features(p, cdw10);
|
||||
case nvme_admin_get_lba_status:
|
||||
return nvme_trace_get_lba_status(p, cdw10);
|
||||
case nvme_admin_format_nvm:
|
||||
return nvme_trace_admin_format_nvm(p, cdw10);
|
||||
default:
|
||||
return nvme_trace_common(p, cdw10);
|
||||
}
|
||||
@ -171,9 +219,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
|
||||
case nvme_cmd_read:
|
||||
case nvme_cmd_write:
|
||||
case nvme_cmd_write_zeroes:
|
||||
case nvme_cmd_zone_append:
|
||||
return nvme_trace_read_write(p, cdw10);
|
||||
case nvme_cmd_dsm:
|
||||
return nvme_trace_dsm(p, cdw10);
|
||||
case nvme_cmd_zone_mgmt_send:
|
||||
return nvme_trace_zone_mgmt_send(p, cdw10);
|
||||
case nvme_cmd_zone_mgmt_recv:
|
||||
return nvme_trace_zone_mgmt_recv(p, cdw10);
|
||||
default:
|
||||
return nvme_trace_common(p, cdw10);
|
||||
}
|
||||
|
@ -74,34 +74,28 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
|
||||
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
||||
struct nvme_smart_log *slog)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
u64 host_reads, host_writes, data_units_read, data_units_written;
|
||||
u16 status;
|
||||
|
||||
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
|
||||
if (!ns) {
|
||||
pr_err("Could not find namespace id : %d\n",
|
||||
le32_to_cpu(req->cmd->get_log_page.nsid));
|
||||
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
||||
return NVME_SC_INVALID_NS;
|
||||
}
|
||||
status = nvmet_req_find_ns(req);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* we don't have the right data for file backed ns */
|
||||
if (!ns->bdev)
|
||||
goto out;
|
||||
if (!req->ns->bdev)
|
||||
return NVME_SC_SUCCESS;
|
||||
|
||||
host_reads = part_stat_read(ns->bdev, ios[READ]);
|
||||
host_reads = part_stat_read(req->ns->bdev, ios[READ]);
|
||||
data_units_read =
|
||||
DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
|
||||
host_writes = part_stat_read(ns->bdev, ios[WRITE]);
|
||||
DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
|
||||
host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
|
||||
data_units_written =
|
||||
DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
|
||||
DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
|
||||
|
||||
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
||||
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
||||
put_unaligned_le64(host_writes, &slog->host_writes[0]);
|
||||
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
|
||||
out:
|
||||
nvmet_put_namespace(ns);
|
||||
|
||||
return NVME_SC_SUCCESS;
|
||||
}
|
||||
@ -468,10 +462,8 @@ out:
|
||||
|
||||
static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvmet_ns *ns;
|
||||
struct nvme_id_ns *id;
|
||||
u16 status = 0;
|
||||
u16 status;
|
||||
|
||||
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||
@ -486,20 +478,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
||||
}
|
||||
|
||||
/* return an all zeroed buffer if we can't find an active namespace */
|
||||
ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
|
||||
if (!ns) {
|
||||
status = NVME_SC_INVALID_NS;
|
||||
status = nvmet_req_find_ns(req);
|
||||
if (status) {
|
||||
status = 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
nvmet_ns_revalidate(ns);
|
||||
nvmet_ns_revalidate(req->ns);
|
||||
|
||||
/*
|
||||
* nuse = ncap = nsze isn't always true, but we have no way to find
|
||||
* that out from the underlying device.
|
||||
*/
|
||||
id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
|
||||
switch (req->port->ana_state[ns->anagrpid]) {
|
||||
id->ncap = id->nsze =
|
||||
cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
|
||||
switch (req->port->ana_state[req->ns->anagrpid]) {
|
||||
case NVME_ANA_INACCESSIBLE:
|
||||
case NVME_ANA_PERSISTENT_LOSS:
|
||||
break;
|
||||
@ -508,8 +501,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ns->bdev)
|
||||
nvmet_bdev_set_limits(ns->bdev, id);
|
||||
if (req->ns->bdev)
|
||||
nvmet_bdev_set_limits(req->ns->bdev, id);
|
||||
|
||||
/*
|
||||
* We just provide a single LBA format that matches what the
|
||||
@ -523,25 +516,24 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
||||
* controllers, but also with any other user of the block device.
|
||||
*/
|
||||
id->nmic = (1 << 0);
|
||||
id->anagrpid = cpu_to_le32(ns->anagrpid);
|
||||
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
|
||||
|
||||
memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
|
||||
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
|
||||
|
||||
id->lbaf[0].ds = ns->blksize_shift;
|
||||
id->lbaf[0].ds = req->ns->blksize_shift;
|
||||
|
||||
if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
|
||||
if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
|
||||
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
|
||||
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
|
||||
NVME_NS_DPC_PI_TYPE3;
|
||||
id->mc = NVME_MC_EXTENDED_LBA;
|
||||
id->dps = ns->pi_type;
|
||||
id->dps = req->ns->pi_type;
|
||||
id->flbas = NVME_NS_FLBAS_META_EXT;
|
||||
id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
|
||||
id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
|
||||
}
|
||||
|
||||
if (ns->readonly)
|
||||
if (req->ns->readonly)
|
||||
id->nsattr |= (1 << 0);
|
||||
nvmet_put_namespace(ns);
|
||||
done:
|
||||
if (!status)
|
||||
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
||||
@ -607,37 +599,32 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
|
||||
|
||||
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
u16 status = 0;
|
||||
off_t off = 0;
|
||||
u16 status;
|
||||
|
||||
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
||||
if (!ns) {
|
||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
status = nvmet_req_find_ns(req);
|
||||
if (status)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
|
||||
if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
|
||||
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
|
||||
NVME_NIDT_UUID_LEN,
|
||||
&ns->uuid, &off);
|
||||
&req->ns->uuid, &off);
|
||||
if (status)
|
||||
goto out_put_ns;
|
||||
goto out;
|
||||
}
|
||||
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
|
||||
if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
|
||||
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
|
||||
NVME_NIDT_NGUID_LEN,
|
||||
&ns->nguid, &off);
|
||||
&req->ns->nguid, &off);
|
||||
if (status)
|
||||
goto out_put_ns;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
|
||||
off) != NVME_IDENTIFY_DATA_SIZE - off)
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
out_put_ns:
|
||||
nvmet_put_namespace(ns);
|
||||
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
@ -696,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
|
||||
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
|
||||
{
|
||||
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
|
||||
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
||||
u16 status;
|
||||
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
|
||||
if (unlikely(!req->ns)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
status = nvmet_req_find_ns(req);
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
switch (write_protect) {
|
||||
@ -757,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
|
||||
|
||||
void nvmet_execute_set_features(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
||||
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
|
||||
u16 status = 0;
|
||||
@ -801,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req)
|
||||
|
||||
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
||||
u32 result;
|
||||
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
|
||||
if (!req->ns) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
}
|
||||
result = nvmet_req_find_ns(req);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
if (req->ns->readonly == true)
|
||||
result = NVME_NS_WRITE_PROTECT;
|
||||
@ -832,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
|
||||
|
||||
void nvmet_execute_get_features(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
||||
u16 status = 0;
|
||||
|
||||
@ -939,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
||||
|
||||
if (nvme_is_fabrics(cmd))
|
||||
return nvmet_parse_fabrics_cmd(req);
|
||||
if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
|
||||
if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
|
||||
return nvmet_parse_discovery_cmd(req);
|
||||
|
||||
ret = nvmet_check_ctrl_status(req, cmd);
|
||||
|
@ -45,7 +45,7 @@ static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
|
||||
{
|
||||
if (p->enabled)
|
||||
pr_err("Disable port '%u' before changing attribute in %s\n",
|
||||
le16_to_cpu(p->disc_addr.portid), caller);
|
||||
le16_to_cpu(p->disc_addr.portid), caller);
|
||||
return p->enabled;
|
||||
}
|
||||
|
||||
@ -266,10 +266,8 @@ static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
|
||||
if (strtobool(page, &val))
|
||||
return -EINVAL;
|
||||
|
||||
if (port->enabled) {
|
||||
pr_err("Disable port before setting pi_enable value.\n");
|
||||
if (nvmet_is_port_enabled(port, __func__))
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
port->pi_enable = val;
|
||||
return count;
|
||||
|
@ -82,6 +82,15 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
|
||||
return status;
|
||||
}
|
||||
|
||||
u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
|
||||
{
|
||||
pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
|
||||
req->sq->qid);
|
||||
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
||||
const char *subsysnqn);
|
||||
|
||||
@ -417,15 +426,18 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
||||
cancel_delayed_work_sync(&ctrl->ka_work);
|
||||
}
|
||||
|
||||
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
|
||||
u16 nvmet_req_find_ns(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
u32 nsid = le32_to_cpu(req->cmd->common.nsid);
|
||||
|
||||
ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
|
||||
if (ns)
|
||||
percpu_ref_get(&ns->ref);
|
||||
req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
|
||||
if (unlikely(!req->ns)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
return ns;
|
||||
percpu_ref_get(&req->ns->ref);
|
||||
return NVME_SC_SUCCESS;
|
||||
}
|
||||
|
||||
static void nvmet_destroy_namespace(struct percpu_ref *ref)
|
||||
@ -862,11 +874,10 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
|
||||
if (nvmet_req_passthru_ctrl(req))
|
||||
return nvmet_parse_passthru_io_cmd(req);
|
||||
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
|
||||
if (unlikely(!req->ns)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
}
|
||||
ret = nvmet_req_find_ns(req);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
ret = nvmet_check_ana_state(req->port, req->ns);
|
||||
if (unlikely(ret)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
@ -880,8 +891,8 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
|
||||
|
||||
if (req->ns->file)
|
||||
return nvmet_file_parse_io_cmd(req);
|
||||
else
|
||||
return nvmet_bdev_parse_io_cmd(req);
|
||||
|
||||
return nvmet_bdev_parse_io_cmd(req);
|
||||
}
|
||||
|
||||
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
||||
|
@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
|
||||
struct list_head avail_defer_list;
|
||||
struct workqueue_struct *work_q;
|
||||
struct kref ref;
|
||||
struct rcu_head rcu;
|
||||
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
|
||||
} __aligned(sizeof(unsigned long long));
|
||||
|
||||
@ -164,9 +165,10 @@ struct nvmet_fc_tgt_assoc {
|
||||
struct nvmet_fc_hostport *hostport;
|
||||
struct nvmet_fc_ls_iod *rcv_disconn;
|
||||
struct list_head a_list;
|
||||
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
|
||||
struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
|
||||
struct kref ref;
|
||||
struct work_struct del_work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
||||
@ -790,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
|
||||
u16 qid, u16 sqsize)
|
||||
{
|
||||
struct nvmet_fc_tgt_queue *queue;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (qid > NVMET_NR_QUEUES)
|
||||
@ -829,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
|
||||
goto out_fail_iodlist;
|
||||
|
||||
WARN_ON(assoc->queues[qid]);
|
||||
spin_lock_irqsave(&assoc->tgtport->lock, flags);
|
||||
assoc->queues[qid] = queue;
|
||||
spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
|
||||
rcu_assign_pointer(assoc->queues[qid], queue);
|
||||
|
||||
return queue;
|
||||
|
||||
@ -851,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
|
||||
{
|
||||
struct nvmet_fc_tgt_queue *queue =
|
||||
container_of(ref, struct nvmet_fc_tgt_queue, ref);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
|
||||
queue->assoc->queues[queue->qid] = NULL;
|
||||
spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
|
||||
rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
|
||||
|
||||
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
|
||||
|
||||
@ -863,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
|
||||
|
||||
destroy_workqueue(queue->work_q);
|
||||
|
||||
kfree(queue);
|
||||
kfree_rcu(queue, rcu);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -965,24 +961,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
|
||||
struct nvmet_fc_tgt_queue *queue;
|
||||
u64 association_id = nvmet_fc_getassociationid(connection_id);
|
||||
u16 qid = nvmet_fc_getqueueid(connection_id);
|
||||
unsigned long flags;
|
||||
|
||||
if (qid > NVMET_NR_QUEUES)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
if (association_id == assoc->association_id) {
|
||||
queue = assoc->queues[qid];
|
||||
queue = rcu_dereference(assoc->queues[qid]);
|
||||
if (queue &&
|
||||
(!atomic_read(&queue->connected) ||
|
||||
!nvmet_fc_tgt_q_get(queue)))
|
||||
queue = NULL;
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
return queue;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1137,7 +1132,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
||||
}
|
||||
if (!needrandom) {
|
||||
assoc->association_id = ran;
|
||||
list_add_tail(&assoc->a_list, &tgtport->assoc_list);
|
||||
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
}
|
||||
@ -1167,7 +1162,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
|
||||
|
||||
nvmet_fc_free_hostport(assoc->hostport);
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_del(&assoc->a_list);
|
||||
list_del_rcu(&assoc->a_list);
|
||||
oldls = assoc->rcv_disconn;
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
/* if pending Rcv Disconnect Association LS, send rsp now */
|
||||
@ -1177,7 +1172,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
|
||||
dev_info(tgtport->dev,
|
||||
"{%d:%d} Association freed\n",
|
||||
tgtport->fc_target_port.port_num, assoc->a_id);
|
||||
kfree(assoc);
|
||||
kfree_rcu(assoc, rcu);
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
}
|
||||
|
||||
@ -1198,7 +1193,6 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||
{
|
||||
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
|
||||
struct nvmet_fc_tgt_queue *queue;
|
||||
unsigned long flags;
|
||||
int i, terminating;
|
||||
|
||||
terminating = atomic_xchg(&assoc->terminating, 1);
|
||||
@ -1207,19 +1201,23 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||
if (terminating)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
|
||||
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
|
||||
queue = assoc->queues[i];
|
||||
if (queue) {
|
||||
if (!nvmet_fc_tgt_q_get(queue))
|
||||
continue;
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
nvmet_fc_delete_target_queue(queue);
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
rcu_read_lock();
|
||||
queue = rcu_dereference(assoc->queues[i]);
|
||||
if (!queue) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!nvmet_fc_tgt_q_get(queue)) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
nvmet_fc_delete_target_queue(queue);
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
|
||||
dev_info(tgtport->dev,
|
||||
"{%d:%d} Association deleted\n",
|
||||
@ -1234,10 +1232,9 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
|
||||
{
|
||||
struct nvmet_fc_tgt_assoc *assoc;
|
||||
struct nvmet_fc_tgt_assoc *ret = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
if (association_id == assoc->association_id) {
|
||||
ret = assoc;
|
||||
if (!nvmet_fc_tgt_a_get(assoc))
|
||||
@ -1245,7 +1242,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1473,19 +1470,17 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
|
||||
static void
|
||||
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
|
||||
{
|
||||
struct nvmet_fc_tgt_assoc *assoc, *next;
|
||||
unsigned long flags;
|
||||
struct nvmet_fc_tgt_assoc *assoc;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry_safe(assoc, next,
|
||||
&tgtport->assoc_list, a_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
if (!nvmet_fc_tgt_a_get(assoc))
|
||||
continue;
|
||||
if (!schedule_work(&assoc->del_work))
|
||||
/* already deleting - release local reference */
|
||||
nvmet_fc_tgt_a_put(assoc);
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1568,16 +1563,16 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
|
||||
continue;
|
||||
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
|
||||
queue = assoc->queues[0];
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
queue = rcu_dereference(assoc->queues[0]);
|
||||
if (queue && queue->nvme_sq.ctrl == ctrl) {
|
||||
if (nvmet_fc_tgt_a_get(assoc))
|
||||
found_ctrl = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
|
||||
|
@ -1545,7 +1545,7 @@ static struct attribute *fcloop_dev_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group fclopp_dev_attrs_group = {
|
||||
static const struct attribute_group fclopp_dev_attrs_group = {
|
||||
.attrs = fcloop_dev_attrs,
|
||||
};
|
||||
|
||||
|
@ -256,8 +256,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
||||
if (is_pci_p2pdma_page(sg_page(req->sg)))
|
||||
op |= REQ_NOMERGE;
|
||||
|
||||
sector = le64_to_cpu(req->cmd->rw.slba);
|
||||
sector <<= (req->ns->blksize_shift - 9);
|
||||
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
|
||||
|
||||
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
|
||||
bio = &req->b.inline_bio;
|
||||
@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
|
||||
int ret;
|
||||
|
||||
ret = __blkdev_issue_discard(ns->bdev,
|
||||
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
|
||||
nvmet_lba_to_sect(ns, range->slba),
|
||||
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
|
||||
GFP_KERNEL, 0, bio);
|
||||
if (ret && ret != -EOPNOTSUPP) {
|
||||
@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
|
||||
if (!nvmet_check_transfer_len(req, 0))
|
||||
return;
|
||||
|
||||
sector = le64_to_cpu(write_zeroes->slba) <<
|
||||
(req->ns->blksize_shift - 9);
|
||||
sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
|
||||
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
|
||||
(req->ns->blksize_shift - 9));
|
||||
|
||||
@ -451,9 +449,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
|
||||
req->execute = nvmet_bdev_execute_write_zeroes;
|
||||
return 0;
|
||||
default:
|
||||
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
|
||||
req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
return nvmet_report_invalid_opcode(req);
|
||||
}
|
||||
}
|
||||
|
@ -400,9 +400,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
|
||||
req->execute = nvmet_file_execute_write_zeroes;
|
||||
return 0;
|
||||
default:
|
||||
pr_err("unhandled cmd for file ns %d on qid %d\n",
|
||||
cmd->common.opcode, req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
return nvmet_report_invalid_opcode(req);
|
||||
}
|
||||
}
|
||||
|
@ -443,7 +443,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
||||
void nvmet_subsys_put(struct nvmet_subsys *subsys);
|
||||
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
|
||||
|
||||
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
|
||||
u16 nvmet_req_find_ns(struct nvmet_req *req);
|
||||
void nvmet_put_namespace(struct nvmet_ns *ns);
|
||||
int nvmet_ns_enable(struct nvmet_ns *ns);
|
||||
void nvmet_ns_disable(struct nvmet_ns *ns);
|
||||
@ -551,6 +551,11 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req)
|
||||
sizeof(struct nvme_dsm_range);
|
||||
}
|
||||
|
||||
static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
|
||||
{
|
||||
return req->sq->ctrl->subsys;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVME_TARGET_PASSTHRU
|
||||
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
|
||||
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
|
||||
@ -585,10 +590,11 @@ static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
|
||||
static inline struct nvme_ctrl *
|
||||
nvmet_req_passthru_ctrl(struct nvmet_req *req)
|
||||
{
|
||||
return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
|
||||
return nvmet_passthru_ctrl(nvmet_req_subsys(req));
|
||||
}
|
||||
|
||||
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
|
||||
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
|
||||
|
||||
/* Convert a 32-bit number to a 16-bit 0's based number */
|
||||
static inline __le16 to0based(u32 a)
|
||||
@ -603,4 +609,14 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
|
||||
return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
|
||||
}
|
||||
|
||||
static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
|
||||
{
|
||||
return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
|
||||
}
|
||||
|
||||
static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
|
||||
{
|
||||
return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
#endif /* _NVMET_H */
|
||||
|
@ -239,9 +239,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
|
||||
}
|
||||
|
||||
q = ns->queue;
|
||||
timeout = req->sq->ctrl->subsys->io_timeout;
|
||||
timeout = nvmet_req_subsys(req)->io_timeout;
|
||||
} else {
|
||||
timeout = req->sq->ctrl->subsys->admin_timeout;
|
||||
timeout = nvmet_req_subsys(req)->admin_timeout;
|
||||
}
|
||||
|
||||
rq = nvme_alloc_request(q, req->cmd, 0);
|
||||
@ -494,7 +494,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
|
||||
return nvmet_setup_passthru_command(req);
|
||||
default:
|
||||
/* Reject commands not in the allowlist above */
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
return nvmet_report_invalid_opcode(req);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -379,7 +379,7 @@ err:
|
||||
return NVME_SC_INTERNAL;
|
||||
}
|
||||
|
||||
static void nvmet_tcp_ddgst(struct ahash_request *hash,
|
||||
static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
|
||||
struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
ahash_request_set_crypt(hash, cmd->req.sg,
|
||||
@ -387,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
|
||||
crypto_ahash_digest(hash);
|
||||
}
|
||||
|
||||
static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
|
||||
struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
struct kvec *iov;
|
||||
int i;
|
||||
|
||||
crypto_ahash_init(hash);
|
||||
for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
|
||||
sg_init_one(&sg, iov->iov_base, iov->iov_len);
|
||||
ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
|
||||
crypto_ahash_update(hash);
|
||||
}
|
||||
ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
|
||||
crypto_ahash_final(hash);
|
||||
}
|
||||
|
||||
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
|
||||
@ -411,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
|
||||
|
||||
if (queue->data_digest) {
|
||||
pdu->hdr.flags |= NVME_TCP_F_DDGST;
|
||||
nvmet_tcp_ddgst(queue->snd_hash, cmd);
|
||||
nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
|
||||
}
|
||||
|
||||
if (cmd->queue->hdr_digest) {
|
||||
@ -1060,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||
|
||||
nvmet_tcp_ddgst(queue->rcv_hash, cmd);
|
||||
nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
|
||||
queue->offset = 0;
|
||||
queue->left = NVME_TCP_DIGEST_LENGTH;
|
||||
queue->rcv_state = NVMET_TCP_RECV_DDGST;
|
||||
@ -1081,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
|
||||
cmd->rbytes_done += ret;
|
||||
}
|
||||
|
||||
if (queue->data_digest) {
|
||||
nvmet_tcp_prep_recv_ddgst(cmd);
|
||||
return 0;
|
||||
}
|
||||
nvmet_tcp_unmap_pdu_iovec(cmd);
|
||||
|
||||
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
|
||||
cmd->rbytes_done == cmd->req.transfer_len) {
|
||||
if (queue->data_digest) {
|
||||
nvmet_tcp_prep_recv_ddgst(cmd);
|
||||
return 0;
|
||||
}
|
||||
cmd->req.execute(&cmd->req);
|
||||
}
|
||||
|
||||
@ -1468,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
|
||||
if (inet->rcv_tos > 0)
|
||||
ip_sock_set_tos(sock->sk, inet->rcv_tos);
|
||||
|
||||
ret = 0;
|
||||
write_lock_bh(&sock->sk->sk_callback_lock);
|
||||
sock->sk->sk_user_data = queue;
|
||||
queue->data_ready = sock->sk->sk_data_ready;
|
||||
sock->sk->sk_data_ready = nvmet_tcp_data_ready;
|
||||
queue->state_change = sock->sk->sk_state_change;
|
||||
sock->sk->sk_state_change = nvmet_tcp_state_change;
|
||||
queue->write_space = sock->sk->sk_write_space;
|
||||
sock->sk->sk_write_space = nvmet_tcp_write_space;
|
||||
if (sock->sk->sk_state != TCP_ESTABLISHED) {
|
||||
/*
|
||||
* If the socket is already closing, don't even start
|
||||
* consuming it
|
||||
*/
|
||||
ret = -ENOTCONN;
|
||||
} else {
|
||||
sock->sk->sk_user_data = queue;
|
||||
queue->data_ready = sock->sk->sk_data_ready;
|
||||
sock->sk->sk_data_ready = nvmet_tcp_data_ready;
|
||||
queue->state_change = sock->sk->sk_state_change;
|
||||
sock->sk->sk_state_change = nvmet_tcp_state_change;
|
||||
queue->write_space = sock->sk->sk_write_space;
|
||||
sock->sk->sk_write_space = nvmet_tcp_write_space;
|
||||
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
|
||||
}
|
||||
write_unlock_bh(&sock->sk->sk_callback_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
|
||||
@ -1526,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
|
||||
if (ret)
|
||||
goto out_destroy_sq;
|
||||
|
||||
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
|
||||
|
||||
return 0;
|
||||
out_destroy_sq:
|
||||
mutex_lock(&nvmet_tcp_queue_mutex);
|
||||
|
@ -48,10 +48,13 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
|
||||
|
||||
static inline void __assign_req_name(char *name, struct nvmet_req *req)
|
||||
{
|
||||
if (req->ns)
|
||||
strncpy(name, req->ns->device_path, DISK_NAME_LEN);
|
||||
else
|
||||
if (!req->ns) {
|
||||
memset(name, 0, DISK_NAME_LEN);
|
||||
return;
|
||||
}
|
||||
|
||||
strncpy(name, req->ns->device_path,
|
||||
min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -490,6 +490,18 @@ static inline int blk_mq_request_completed(struct request *rq)
|
||||
return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Set the state to complete when completing a request from inside ->queue_rq.
|
||||
* This is used by drivers that want to ensure special complete actions that
|
||||
* need access to the request are called on failure, e.g. by nvme for
|
||||
* multipathing.
|
||||
*/
|
||||
static inline void blk_mq_set_request_complete(struct request *rq)
|
||||
{
|
||||
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
|
||||
}
|
||||
|
||||
void blk_mq_start_request(struct request *rq);
|
||||
void blk_mq_end_request(struct request *rq, blk_status_t error);
|
||||
void __blk_mq_end_request(struct request *rq, blk_status_t error);
|
||||
|
@ -697,7 +697,11 @@ enum nvme_opcode {
|
||||
nvme_opcode_name(nvme_cmd_resv_register), \
|
||||
nvme_opcode_name(nvme_cmd_resv_report), \
|
||||
nvme_opcode_name(nvme_cmd_resv_acquire), \
|
||||
nvme_opcode_name(nvme_cmd_resv_release))
|
||||
nvme_opcode_name(nvme_cmd_resv_release), \
|
||||
nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
|
||||
nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
|
||||
nvme_opcode_name(nvme_cmd_zone_append))
|
||||
|
||||
|
||||
|
||||
/*
|
||||
@ -1473,20 +1477,29 @@ enum {
|
||||
NVME_SC_SGL_INVALID_DATA = 0xf,
|
||||
NVME_SC_SGL_INVALID_METADATA = 0x10,
|
||||
NVME_SC_SGL_INVALID_TYPE = 0x11,
|
||||
|
||||
NVME_SC_CMB_INVALID_USE = 0x12,
|
||||
NVME_SC_PRP_INVALID_OFFSET = 0x13,
|
||||
NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
|
||||
NVME_SC_OP_DENIED = 0x15,
|
||||
NVME_SC_SGL_INVALID_OFFSET = 0x16,
|
||||
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
|
||||
|
||||
NVME_SC_RESERVED = 0x17,
|
||||
NVME_SC_HOST_ID_INCONSIST = 0x18,
|
||||
NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
|
||||
NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
|
||||
NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
|
||||
NVME_SC_SANITIZE_FAILED = 0x1C,
|
||||
NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
|
||||
|
||||
NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
|
||||
NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
|
||||
NVME_SC_NS_WRITE_PROTECTED = 0x20,
|
||||
NVME_SC_CMD_INTERRUPTED = 0x21,
|
||||
NVME_SC_TRANSIENT_TR_ERR = 0x22,
|
||||
|
||||
NVME_SC_LBA_RANGE = 0x80,
|
||||
NVME_SC_CAP_EXCEEDED = 0x81,
|
||||
NVME_SC_NS_NOT_READY = 0x82,
|
||||
NVME_SC_RESERVATION_CONFLICT = 0x83,
|
||||
NVME_SC_FORMAT_IN_PROGRESS = 0x84,
|
||||
|
||||
/*
|
||||
* Command Specific Status:
|
||||
@ -1519,8 +1532,15 @@ enum {
|
||||
NVME_SC_NS_NOT_ATTACHED = 0x11a,
|
||||
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
|
||||
NVME_SC_CTRL_LIST_INVALID = 0x11c,
|
||||
NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
|
||||
NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
|
||||
NVME_SC_CTRL_ID_INVALID = 0x11f,
|
||||
NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
|
||||
NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
|
||||
NVME_SC_RES_ID_INVALID = 0x122,
|
||||
NVME_SC_PMR_SAN_PROHIBITED = 0x123,
|
||||
NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
|
||||
NVME_SC_ANA_ATTACH_FAILED = 0x125,
|
||||
|
||||
/*
|
||||
* I/O Command Set Specific - NVM commands:
|
||||
|
Loading…
Reference in New Issue
Block a user