mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
dmaengine: idxd: Convert spinlock to mutex to lock evl workqueue
drain_workqueue() cannot be called safely in a spinlocked context due to
possible task rescheduling. In the multi-task scenario, calling
queue_work() while drain_workqueue() will lead to a Call Trace as
pushing a work on a draining workqueue is not permitted in spinlocked
context.
Call Trace:
<TASK>
? __warn+0x7d/0x140
? __queue_work+0x2b2/0x440
? report_bug+0x1f8/0x200
? handle_bug+0x3c/0x70
? exc_invalid_op+0x18/0x70
? asm_exc_invalid_op+0x1a/0x20
? __queue_work+0x2b2/0x440
queue_work_on+0x28/0x30
idxd_misc_thread+0x303/0x5a0 [idxd]
? __schedule+0x369/0xb40
? __pfx_irq_thread_fn+0x10/0x10
? irq_thread+0xbc/0x1b0
irq_thread_fn+0x21/0x70
irq_thread+0x102/0x1b0
? preempt_count_add+0x74/0xa0
? __pfx_irq_thread_dtor+0x10/0x10
? __pfx_irq_thread+0x10/0x10
kthread+0x103/0x140
? __pfx_kthread+0x10/0x10
ret_from_fork+0x31/0x50
? __pfx_kthread+0x10/0x10
ret_from_fork_asm+0x1b/0x30
</TASK>
The current implementation uses a spinlock to protect event log workqueue
and will lead to the Call Trace due to potential task rescheduling.
To address the locking issue, convert the spinlock to mutex, allowing
the drain_workqueue() to be called in a safe mutex-locked context.
This change ensures proper synchronization when accessing the event log
workqueue, preventing potential Call Trace and improving the overall
robustness of the code.
Fixes: c40bd7d973
("dmaengine: idxd: process user page faults for completion record")
Signed-off-by: Rex Zhang <rex.zhang@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Fenghua Yu <fenghua.yu@intel.com>
Reviewed-by: Lijun Pan <lijun.pan@intel.com>
Link: https://lore.kernel.org/r/20240404223949.2885604-1-fenghua.yu@intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
9140ce4787
commit
d5638de827
@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
|
||||
if (!evl)
|
||||
return;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
mutex_lock(&evl->lock);
|
||||
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = status.tail;
|
||||
h = status.head;
|
||||
@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
|
||||
set_bit(h, evl->bmap);
|
||||
h = (h + 1) % size;
|
||||
}
|
||||
spin_unlock(&evl->lock);
|
||||
|
||||
drain_workqueue(wq->wq);
|
||||
mutex_unlock(&evl->lock);
|
||||
}
|
||||
|
||||
static int idxd_cdev_release(struct inode *node, struct file *filep)
|
||||
|
@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
|
||||
if (!evl || !evl->log)
|
||||
return 0;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
mutex_lock(&evl->lock);
|
||||
|
||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = evl_status.tail;
|
||||
@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
|
||||
dump_event_entry(idxd, s, i, &count, processed);
|
||||
}
|
||||
|
||||
spin_unlock(&evl->lock);
|
||||
mutex_unlock(&evl->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
mutex_lock(&evl->lock);
|
||||
evl->log = addr;
|
||||
evl->dma = dma_addr;
|
||||
evl->log_size = size;
|
||||
@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
|
||||
gencfg.evl_en = 1;
|
||||
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
|
||||
spin_unlock(&evl->lock);
|
||||
mutex_unlock(&evl->lock);
|
||||
return 0;
|
||||
|
||||
err_alloc:
|
||||
@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
|
||||
if (!gencfg.evl_en)
|
||||
return;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
mutex_lock(&evl->lock);
|
||||
gencfg.evl_en = 0;
|
||||
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
|
||||
@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
|
||||
evl_dma = evl->dma;
|
||||
evl->log = NULL;
|
||||
evl->size = IDXD_EVL_SIZE_MIN;
|
||||
spin_unlock(&evl->lock);
|
||||
mutex_unlock(&evl->lock);
|
||||
|
||||
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ struct idxd_driver_data {
|
||||
|
||||
struct idxd_evl {
|
||||
/* Lock to protect event log access. */
|
||||
spinlock_t lock;
|
||||
struct mutex lock;
|
||||
void *log;
|
||||
dma_addr_t dma;
|
||||
/* Total size of event log = number of entries * entry size. */
|
||||
|
@ -354,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
|
||||
if (!evl)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&evl->lock);
|
||||
mutex_init(&evl->lock);
|
||||
evl->size = IDXD_EVL_SIZE_MIN;
|
||||
|
||||
idxd_name = dev_name(idxd_confdev(idxd));
|
||||
|
@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
|
||||
evl_status.bits = 0;
|
||||
evl_status.int_pending = 1;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
mutex_lock(&evl->lock);
|
||||
/* Clear interrupt pending bit */
|
||||
iowrite32(evl_status.bits_upper32,
|
||||
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
|
||||
@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
|
||||
|
||||
evl_status.head = h;
|
||||
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
spin_unlock(&evl->lock);
|
||||
mutex_unlock(&evl->lock);
|
||||
}
|
||||
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
|
Loading…
Reference in New Issue
Block a user