mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 18:23:53 +08:00
dmaengine: qcom_hidma: make pending_tre_count atomic
Getting ready for the MSI interrupts. The pending_tre_count is used in the interrupt handler to make sure all outstanding requests are serviced. The driver will allocate 11 MSI interrupts. Each MSI interrupt can be assigned to a different CPU. Then, we have a race condition for common variables as they share the same interrupt handler with a different cause bit and they can potentially be executed in parallel. Making this variable atomic so that it can be updated from multiple processor contexts. Signed-off-by: Sinan Kaya <okaya@codeaurora.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
parent
fc737969f6
commit
bdcfddfd74
@ -59,7 +59,7 @@ struct hidma_lldev {
|
||||
void __iomem *evca; /* Event Channel address */
|
||||
struct hidma_tre
|
||||
**pending_tre_list; /* Pointers to pending TREs */
|
||||
s32 pending_tre_count; /* Number of TREs pending */
|
||||
atomic_t pending_tre_count; /* Number of TREs pending */
|
||||
|
||||
void *tre_ring; /* TRE ring */
|
||||
dma_addr_t tre_dma; /* TRE ring to be shared with HW */
|
||||
|
@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
|
||||
seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
|
||||
seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
|
||||
seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
|
||||
seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
|
||||
seq_printf(s, "pending_tre_count=%d\n",
|
||||
atomic_read(&lldev->pending_tre_count));
|
||||
seq_printf(s, "evca=%p\n", lldev->evca);
|
||||
seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
|
||||
seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
|
||||
|
@ -218,10 +218,9 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
|
||||
* Keep track of pending TREs that SW is expecting to receive
|
||||
* from HW. We got one now. Decrement our counter.
|
||||
*/
|
||||
lldev->pending_tre_count--;
|
||||
if (lldev->pending_tre_count < 0) {
|
||||
if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
|
||||
dev_warn(lldev->dev, "tre count mismatch on completion");
|
||||
lldev->pending_tre_count = 0;
|
||||
atomic_set(&lldev->pending_tre_count, 0);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&lldev->lock, flags);
|
||||
@ -328,7 +327,7 @@ void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
|
||||
u32 tre_read_off;
|
||||
|
||||
tre_iterator = lldev->tre_processed_off;
|
||||
while (lldev->pending_tre_count) {
|
||||
while (atomic_read(&lldev->pending_tre_count)) {
|
||||
if (hidma_post_completed(lldev, tre_iterator, err_info,
|
||||
err_code))
|
||||
break;
|
||||
@ -555,7 +554,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
|
||||
tre->err_code = 0;
|
||||
tre->err_info = 0;
|
||||
tre->queued = 1;
|
||||
lldev->pending_tre_count++;
|
||||
atomic_inc(&lldev->pending_tre_count);
|
||||
lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
|
||||
% lldev->tre_ring_size;
|
||||
spin_unlock_irqrestore(&lldev->lock, flags);
|
||||
@ -650,7 +649,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
|
||||
u32 val;
|
||||
u32 nr_tres = lldev->nr_tres;
|
||||
|
||||
lldev->pending_tre_count = 0;
|
||||
atomic_set(&lldev->pending_tre_count, 0);
|
||||
lldev->tre_processed_off = 0;
|
||||
lldev->evre_processed_off = 0;
|
||||
lldev->tre_write_offset = 0;
|
||||
@ -831,7 +830,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
|
||||
tasklet_kill(&lldev->task);
|
||||
memset(lldev->trepool, 0, required_bytes);
|
||||
lldev->trepool = NULL;
|
||||
lldev->pending_tre_count = 0;
|
||||
atomic_set(&lldev->pending_tre_count, 0);
|
||||
lldev->tre_write_offset = 0;
|
||||
|
||||
rc = hidma_ll_reset(lldev);
|
||||
|
Loading…
Reference in New Issue
Block a user