SCSI fixes on 20240216

Three fixes: the two fnic ones are a revert and a refix, which is why
 the diffstat is a bit big.  The target one also extracts a function to
 add a check for configuration and so looks bigger than it is.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCZc9vhyYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishai6AQDMzhWs
 7uId14w0TdPcodG+2PpKs+VgKH6kwdl39zCQ/AEAprIATmraIoPDZlWYBY0XjVMx
 +x40yPEo856XvRQ6NmY=
 =ORQ9
 -----END PGP SIGNATURE-----

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Three fixes: the two fnic ones are a revert and a refix, which is why
  the diffstat is a bit big. The target one also extracts a function to
  add a check for configuration and so looks bigger than it is"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: fnic: Move fnic_fnic_flush_tx() to a work queue
  scsi: Revert "scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock"
  scsi: target: Fix unmap setup during configuration
This commit is contained in:
Linus Torvalds 2024-02-16 14:05:02 -08:00
commit c1ca10ceff
6 changed files with 48 additions and 33 deletions

View File

@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{ {
struct fcoe_fcf *sel; struct fcoe_fcf *sel;
struct fcoe_fcf *fcf; struct fcoe_fcf *fcf;
unsigned long flags;
mutex_lock(&fip->ctlr_mutex); mutex_lock(&fip->ctlr_mutex);
spin_lock_irqsave(&fip->ctlr_lock, flags); spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req); kfree_skb(fip->flogi_req);
fip->flogi_req = NULL; fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0; fcf->flogi_sent = 0;
spin_unlock_irqrestore(&fip->ctlr_lock, flags); spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf; sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{ {
struct fc_frame *fp; struct fc_frame *fp;
struct fc_frame_header *fh; struct fc_frame_header *fh;
unsigned long flags;
u16 old_xid; u16 old_xid;
u8 op; u8 op;
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI; op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN) if (fip->mode == FIP_MODE_VN2VN)
break; break;
spin_lock_irqsave(&fip->ctlr_lock, flags); spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req); kfree_skb(fip->flogi_req);
fip->flogi_req = skb; fip->flogi_req = skb;
fip->flogi_req_send = 1; fip->flogi_req_send = 1;
spin_unlock_irqrestore(&fip->ctlr_lock, flags); spin_unlock_bh(&fip->ctlr_lock);
schedule_work(&fip->timer_work); schedule_work(&fip->timer_work);
return -EINPROGRESS; return -EINPROGRESS;
case ELS_FDISC: case ELS_FDISC:
@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{ {
struct fcoe_fcf *fcf; struct fcoe_fcf *fcf;
unsigned long flags;
int error; int error;
mutex_lock(&fip->ctlr_mutex); mutex_lock(&fip->ctlr_mutex);
spin_lock_irqsave(&fip->ctlr_lock, flags); spin_lock_bh(&fip->ctlr_lock);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip); fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) { if (!fcf || fcf->flogi_sent) {
@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL); fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip); error = fcoe_ctlr_flogi_send_locked(fip);
} }
spin_unlock_irqrestore(&fip->ctlr_lock, flags); spin_unlock_bh(&fip->ctlr_lock);
mutex_unlock(&fip->ctlr_mutex); mutex_unlock(&fip->ctlr_mutex);
return error; return error;
} }
@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{ {
struct fcoe_fcf *fcf; struct fcoe_fcf *fcf;
unsigned long flags;
spin_lock_irqsave(&fip->ctlr_lock, flags); spin_lock_bh(&fip->ctlr_lock);
fcf = fip->sel_fcf; fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send) if (!fcf || !fip->flogi_req_send)
goto unlock; goto unlock;
@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */ } else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock: unlock:
spin_unlock_irqrestore(&fip->ctlr_lock, flags); spin_unlock_bh(&fip->ctlr_lock);
} }
/** /**

View File

@ -305,6 +305,7 @@ struct fnic {
unsigned int copy_wq_base; unsigned int copy_wq_base;
struct work_struct link_work; struct work_struct link_work;
struct work_struct frame_work; struct work_struct frame_work;
struct work_struct flush_work;
struct sk_buff_head frame_queue; struct sk_buff_head frame_queue;
struct sk_buff_head tx_queue; struct sk_buff_head tx_queue;
@ -363,7 +364,7 @@ void fnic_handle_event(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq); int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
void fnic_flush_tx(struct fnic *); void fnic_flush_tx(struct work_struct *work);
void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
void fnic_update_mac(struct fc_lport *, u8 *new); void fnic_update_mac(struct fc_lport *, u8 *new);

View File

@ -1182,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
/** /**
* fnic_flush_tx() - send queued frames. * fnic_flush_tx() - send queued frames.
* @fnic: fnic device * @work: pointer to work element
* *
* Send frames that were waiting to go out in FC or Ethernet mode. * Send frames that were waiting to go out in FC or Ethernet mode.
* Whenever changing modes we purge queued frames, so these frames should * Whenever changing modes we purge queued frames, so these frames should
@ -1190,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
* *
* Called without fnic_lock held. * Called without fnic_lock held.
*/ */
void fnic_flush_tx(struct fnic *fnic) void fnic_flush_tx(struct work_struct *work)
{ {
struct fnic *fnic = container_of(work, struct fnic, flush_work);
struct sk_buff *skb; struct sk_buff *skb;
struct fc_frame *fp; struct fc_frame *fp;

View File

@ -830,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&fnic->vlans_lock); spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event); INIT_WORK(&fnic->event_work, fnic_handle_event);
INIT_WORK(&fnic->flush_work, fnic_flush_tx);
skb_queue_head_init(&fnic->fip_frame_queue); skb_queue_head_init(&fnic->fip_frame_queue);
INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans); INIT_LIST_HEAD(&fnic->vlans);

View File

@ -680,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->fnic_lock, flags); spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_flush_tx(fnic); queue_work(fnic_event_queue, &fnic->flush_work);
reset_cmpl_handler_end: reset_cmpl_handler_end:
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
@ -736,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
} }
spin_unlock_irqrestore(&fnic->fnic_lock, flags); spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_flush_tx(fnic); queue_work(fnic_event_queue, &fnic->flush_work);
queue_work(fnic_event_queue, &fnic->frame_work); queue_work(fnic_event_queue, &fnic->frame_work);
} else { } else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags); spin_unlock_irqrestore(&fnic->fnic_lock, flags);

View File

@ -759,6 +759,29 @@ static ssize_t emulate_tas_store(struct config_item *item,
return count; return count;
} }
static int target_try_configure_unmap(struct se_device *dev,
const char *config_opt)
{
if (!dev->transport->configure_unmap) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
if (!target_dev_configured(dev)) {
pr_err("Generic Block Discard setup for %s requires device to be configured\n",
config_opt);
return -ENODEV;
}
if (!dev->transport->configure_unmap(dev)) {
pr_err("Generic Block Discard setup for %s failed\n",
config_opt);
return -ENOSYS;
}
return 0;
}
static ssize_t emulate_tpu_store(struct config_item *item, static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count) const char *page, size_t count)
{ {
@ -776,11 +799,9 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice(). * Discard supported is detected iblock_create_virtdevice().
*/ */
if (flag && !da->max_unmap_block_desc_count) { if (flag && !da->max_unmap_block_desc_count) {
if (!dev->transport->configure_unmap || ret = target_try_configure_unmap(dev, "emulate_tpu");
!dev->transport->configure_unmap(dev)) { if (ret)
pr_err("Generic Block Discard not supported\n"); return ret;
return -ENOSYS;
}
} }
da->emulate_tpu = flag; da->emulate_tpu = flag;
@ -806,11 +827,9 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice(). * Discard supported is detected iblock_create_virtdevice().
*/ */
if (flag && !da->max_unmap_block_desc_count) { if (flag && !da->max_unmap_block_desc_count) {
if (!dev->transport->configure_unmap || ret = target_try_configure_unmap(dev, "emulate_tpws");
!dev->transport->configure_unmap(dev)) { if (ret)
pr_err("Generic Block Discard not supported\n"); return ret;
return -ENOSYS;
}
} }
da->emulate_tpws = flag; da->emulate_tpws = flag;
@ -1022,12 +1041,9 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device(). * Discard supported is detected iblock_configure_device().
*/ */
if (flag && !da->max_unmap_block_desc_count) { if (flag && !da->max_unmap_block_desc_count) {
if (!dev->transport->configure_unmap || ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
!dev->transport->configure_unmap(dev)) { if (ret)
pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n", return ret;
da->da_dev);
return -ENOSYS;
}
} }
da->unmap_zeroes_data = flag; da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",