workqueue: Ordered workqueue creation cleanups

For historical reasons, unbound workqueues with max concurrency limit of 1
 are considered ordered, even though the concurrency limit hasn't been
 system-wide for a long time. This creates ambiguity around whether ordered
 execution is actually required for correctness, which was actually confusing
 for e.g. btrfs (btrfs updates are being routed through the btrfs tree).
 
 There aren't that many users in the tree which use the combination and there
 are pending improvements to unbound workqueue affinity handling which will
 make inadvertent use of ordered workqueue a bigger loss. This pull request
 clarifies the situation for most of them by updating the ones which require
 ordered execution to use alloc_ordered_workqueue().
 
 There are some conversions being routed through subsystem-specific trees and
 likely a few stragglers. Once they're all converted, workqueue can trigger a
 warning on unbound + @max_active==1 usages and eventually drop the implicit
 ordered behavior.
 -----BEGIN PGP SIGNATURE-----
 
 iIQEABYIACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCZJoKnA4cdGpAa2VybmVs
 Lm9yZwAKCRCxYfJx3gVYGc5SAQDOtjML7Cx9AYzbY5+nYc0wTebRRTXGeOu7A3Xy
 j50rVgEAjHgvHLIdmeYmVhCeHOSN4q7Wn5AOwaIqZalOhfLyKQk=
 =hs79
 -----END PGP SIGNATURE-----

Merge tag 'wq-for-6.5-cleanup-ordered' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull ordered workqueue creation updates from Tejun Heo:
 "For historical reasons, unbound workqueues with max concurrency limit
  of 1 are considered ordered, even though the concurrency limit hasn't
  been system-wide for a long time.

  This creates ambiguity around whether ordered execution is actually
  required for correctness, which was actually confusing for e.g. btrfs
  (btrfs updates are being routed through the btrfs tree).

  There aren't that many users in the tree which use the combination and
  there are pending improvements to unbound workqueue affinity handling
  which will make inadvertent use of ordered workqueue a bigger loss.

  This clarifies the situation for most of them by updating the ones
  which require ordered execution to use alloc_ordered_workqueue().

  There are some conversions being routed through subsystem-specific
  trees and likely a few stragglers. Once they're all converted,
  workqueue can trigger a warning on unbound + @max_active==1 usages and
  eventually drop the implicit ordered behavior"

* tag 'wq-for-6.5-cleanup-ordered' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  rxrpc: Use alloc_ordered_workqueue() to create ordered workqueues
  net: qrtr: Use alloc_ordered_workqueue() to create ordered workqueues
  net: wwan: t7xx: Use alloc_ordered_workqueue() to create ordered workqueues
  dm integrity: Use alloc_ordered_workqueue() to create ordered workqueues
  media: amphion: Use alloc_ordered_workqueue() to create ordered workqueues
  scsi: NCR5380: Use default @max_active for hostdata->work_q
  media: coda: Use alloc_ordered_workqueue() to create ordered workqueues
  crypto: octeontx2: Use alloc_ordered_workqueue() to create ordered workqueues
  wifi: ath10/11/12k: Use alloc_ordered_workqueue() to create ordered workqueues
  wifi: mwifiex: Use default @max_active for workqueues
  wifi: iwlwifi: Use default @max_active for trans_pcie->rba.alloc_wq
  xen/pvcalls: Use alloc_ordered_workqueue() to create ordered workqueues
  virt: acrn: Use alloc_ordered_workqueue() to create ordered workqueues
  net: octeontx2: Use alloc_ordered_workqueue() to create ordered workqueues
  net: thunderx: Use alloc_ordered_workqueue() to create ordered workqueues
  greybus: Use alloc_ordered_workqueue() to create ordered workqueues
  powerpc, workqueue: Use alloc_ordered_workqueue() to create ordered workqueues
This commit is contained in:
Linus Torvalds 2023-06-27 16:46:06 -07:00
commit 72dc6db7e3
28 changed files with 57 additions and 65 deletions

View File

@ -200,7 +200,7 @@ static int __init TAU_init(void)
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
!strcmp(cur_cpu_spec->platform, "ppc750");
tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
tau_workq = alloc_ordered_workqueue("tau", 0);
if (!tau_workq)
return -ENOMEM;

View File

@ -564,8 +564,7 @@ int __init dlpar_workqueue_init(void)
if (pseries_hp_wq)
return 0;
pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
WQ_UNBOUND, 1);
pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0);
return pseries_hp_wq ? 0 : -ENOMEM;
}

View File

@ -357,9 +357,9 @@ static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
u64 vfpf_mbox_base;
int err, i;
cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
cptpf->vfpf_mbox_wq =
alloc_ordered_workqueue("cpt_vfpf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!cptpf->vfpf_mbox_wq)
return -ENOMEM;
@ -453,9 +453,9 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
resource_size_t offset;
int err;
cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
cptpf->afpf_mbox_wq =
alloc_ordered_workqueue("cpt_afpf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!cptpf->afpf_mbox_wq)
return -ENOMEM;

View File

@ -75,9 +75,9 @@ static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
resource_size_t offset, size;
int ret;
cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
cptvf->pfvf_mbox_wq =
alloc_ordered_workqueue("cpt_pfvf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!cptvf->pfvf_mbox_wq)
return -ENOMEM;

View File

@ -187,8 +187,8 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
spin_lock_init(&connection->lock);
INIT_LIST_HEAD(&connection->operations);
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
dev_name(&hd->dev), hd_cport_id);
connection->wq = alloc_ordered_workqueue("%s:%d", 0, dev_name(&hd->dev),
hd_cport_id);
if (!connection->wq) {
ret = -ENOMEM;
goto err_free_connection;

View File

@ -1318,7 +1318,7 @@ struct gb_svc *gb_svc_create(struct gb_host_device *hd)
if (!svc)
return NULL;
svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev));
if (!svc->wq) {
kfree(svc);
return NULL;

View File

@ -4268,10 +4268,10 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
}
/*
* If this workqueue were percpu, it would cause bio reordering
* If this workqueue weren't ordered, it would cause bio reordering
* and reduced performance.
*/
ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM);
if (!ic->wait_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;

View File

@ -207,7 +207,7 @@ static int __init local_init(void)
if (r)
return r;
deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
if (!deferred_remove_workqueue) {
r = -ENOMEM;
goto out_uevent_exit;

View File

@ -254,7 +254,7 @@ static int vpu_core_register(struct device *dev, struct vpu_core *core)
if (vpu_core_is_exist(vpu, core))
return 0;
core->workqueue = alloc_workqueue("vpu", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
core->workqueue = alloc_ordered_workqueue("vpu", WQ_MEM_RECLAIM);
if (!core->workqueue) {
dev_err(core->dev, "fail to alloc workqueue\n");
return -ENOMEM;

View File

@ -740,7 +740,7 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
inst->fh.ctrl_handler = &inst->ctrl_handler;
file->private_data = &inst->fh;
inst->state = VPU_CODEC_STATE_DEINIT;
inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM);
if (inst->workqueue) {
INIT_WORK(&inst->msg_work, vpu_inst_run_work);
ret = kfifo_init(&inst->msg_fifo,

View File

@ -3268,7 +3268,7 @@ static int coda_probe(struct platform_device *pdev)
&dev->iram.blob);
}
dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
dev->workqueue = alloc_ordered_workqueue("coda", WQ_MEM_RECLAIM);
if (!dev->workqueue) {
dev_err(&pdev->dev, "unable to alloc workqueue\n");
ret = -ENOMEM;

View File

@ -1126,8 +1126,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
poll:
lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
WQ_MEM_RECLAIM, 1);
lmac->check_link = alloc_ordered_workqueue("check_link", WQ_MEM_RECLAIM);
if (!lmac->check_link)
return -ENOMEM;
INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);

View File

@ -3044,9 +3044,8 @@ static int rvu_flr_init(struct rvu *rvu)
cfg | BIT_ULL(22));
}
rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1);
rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!rvu->flr_wq)
return -ENOMEM;

View File

@ -271,8 +271,7 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
{
int vf;
pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
WQ_UNBOUND | WQ_HIGHPRI, 1);
pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
if (!pf->flr_wq)
return -ENOMEM;
@ -593,9 +592,8 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf)
return -ENOMEM;
pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
@ -1063,9 +1061,8 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
int err;
mbox->pfvf = pf;
pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!pf->mbox_wq)
return -ENOMEM;

View File

@ -297,9 +297,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
int err;
mbox->pfvf = vf;
vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!vf->mbox_wq)
return -ENOMEM;

View File

@ -1082,8 +1082,7 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
if (ret)
goto err;
qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event",
WQ_UNBOUND, 1);
qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0);
if (!qmi->event_wq) {
ath10k_err(ar, "failed to allocate workqueue\n");
ret = -EFAULT;

View File

@ -3256,8 +3256,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
return ret;
}
ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event",
WQ_UNBOUND, 1);
ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0);
if (!ab->qmi.event_wq) {
ath11k_err(ab, "failed to allocate workqueue\n");
return -EFAULT;

View File

@ -3056,8 +3056,7 @@ int ath12k_qmi_init_service(struct ath12k_base *ab)
return ret;
}
ab->qmi.event_wq = alloc_workqueue("ath12k_qmi_driver_event",
WQ_UNBOUND, 1);
ab->qmi.event_wq = alloc_ordered_workqueue("ath12k_qmi_driver_event", 0);
if (!ab->qmi.event_wq) {
ath12k_err(ab, "failed to allocate workqueue\n");
return -EFAULT;

View File

@ -3576,7 +3576,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->imr_waitq);
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!trans_pcie->rba.alloc_wq) {
ret = -ENOMEM;
goto out_free_trans;

View File

@ -3127,7 +3127,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
WQ_UNBOUND, 1, name);
WQ_UNBOUND, 0, name);
if (!priv->dfs_cac_workqueue) {
mwifiex_dbg(adapter, ERROR, "cannot alloc DFS CAC queue\n");
ret = -ENOMEM;
@ -3138,7 +3138,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s",
WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, name);
WQ_MEM_RECLAIM, 0, name);
if (!priv->dfs_chan_sw_workqueue) {
mwifiex_dbg(adapter, ERROR, "cannot alloc DFS channel sw queue\n");
ret = -ENOMEM;

View File

@ -1547,7 +1547,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->workqueue =
alloc_workqueue("MWIFIEX_WORK_QUEUE",
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!adapter->workqueue)
goto err_kmalloc;
@ -1557,7 +1557,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
WQ_UNBOUND, 1);
WQ_UNBOUND, 0);
if (!adapter->rx_workqueue)
goto err_kmalloc;
INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
@ -1702,7 +1702,7 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->workqueue =
alloc_workqueue("MWIFIEX_WORK_QUEUE",
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!adapter->workqueue)
goto err_kmalloc;
@ -1712,7 +1712,7 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
WQ_UNBOUND, 1);
WQ_UNBOUND, 0);
if (!adapter->rx_workqueue)
goto err_kmalloc;

View File

@ -1293,9 +1293,9 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
md_ctrl->txq[i].worker =
alloc_workqueue("md_hif%d_tx%d_worker",
WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
1, md_ctrl->hif_id, i);
alloc_ordered_workqueue("md_hif%d_tx%d_worker",
WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
md_ctrl->hif_id, i);
if (!md_ctrl->txq[i].worker)
goto err_workqueue;
@ -1306,9 +1306,10 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker",
WQ_UNBOUND | WQ_MEM_RECLAIM,
1, md_ctrl->hif_id, i);
md_ctrl->rxq[i].worker =
alloc_ordered_workqueue("md_hif%d_rx%d_worker",
WQ_MEM_RECLAIM,
md_ctrl->hif_id, i);
if (!md_ctrl->rxq[i].worker)
goto err_workqueue;
}

View File

@ -618,8 +618,9 @@ int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
return ret;
}
txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM |
(txq->index ? 0 : WQ_HIGHPRI), 1, txq->index);
txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
txq->index);
if (!txq->worker)
return -ENOMEM;

View File

@ -417,7 +417,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
INIT_WORK(&hostdata->main_task, NCR5380_main);
hostdata->work_q = alloc_workqueue("ncr5380_%d",
WQ_UNBOUND | WQ_MEM_RECLAIM,
1, instance->host_no);
0, instance->host_no);
if (!hostdata->work_q)
return -ENOMEM;

View File

@ -576,8 +576,8 @@ static void ioreq_resume(void)
int acrn_ioreq_intr_setup(void)
{
acrn_setup_intr_handler(ioreq_intr_handler);
ioreq_wq = alloc_workqueue("ioreq_wq",
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
ioreq_wq = alloc_ordered_workqueue("ioreq_wq",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!ioreq_wq) {
dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
acrn_remove_intr_handler();

View File

@ -363,7 +363,7 @@ static struct sock_mapping *pvcalls_new_active_socket(
map->data.in = map->bytes;
map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
map->ioworker.wq = alloc_ordered_workqueue("pvcalls_io", 0);
if (!map->ioworker.wq)
goto out;
atomic_set(&map->io, 1);
@ -636,7 +636,7 @@ static int pvcalls_back_bind(struct xenbus_device *dev,
INIT_WORK(&map->register_work, __pvcalls_back_accept);
spin_lock_init(&map->copy_lock);
map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
map->wq = alloc_ordered_workqueue("pvcalls_wq", 0);
if (!map->wq) {
ret = -ENOMEM;
goto out;

View File

@ -783,7 +783,7 @@ int qrtr_ns_init(void)
goto err_sock;
}
qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
qrtr_ns.workqueue = alloc_ordered_workqueue("qrtr_ns_handler", 0);
if (!qrtr_ns.workqueue) {
ret = -ENOMEM;
goto err_sock;

View File

@ -989,7 +989,7 @@ static int __init af_rxrpc_init(void)
goto error_call_jar;
}
rxrpc_workqueue = alloc_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
rxrpc_workqueue = alloc_ordered_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!rxrpc_workqueue) {
pr_notice("Failed to allocate work queue\n");
goto error_work_queue;