mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 14:14:01 +08:00
media: am437x-vpfe: Rework ISR routine for clarity
Make the ISR code simpler to follow by removing goto and relocating/eliminating duplicate spinlock accesses. Signed-off-by: Benoit Parrot <bparrot@ti.com> Acked-by: Lad Prabhakar <prabhakar.csengg@gmail.com> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
This commit is contained in:
parent
47c7bcfdb3
commit
e6784f9e4e
@ -1233,22 +1233,29 @@ unlock:
|
|||||||
* This function will get next buffer from the dma queue and
|
* This function will get next buffer from the dma queue and
|
||||||
* set the buffer address in the vpfe register for capture.
|
* set the buffer address in the vpfe register for capture.
|
||||||
* the buffer is marked active
|
* the buffer is marked active
|
||||||
*
|
|
||||||
* Assumes caller is holding vpfe->dma_queue_lock already
|
|
||||||
*/
|
*/
|
||||||
static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
|
static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
|
||||||
{
|
{
|
||||||
|
dma_addr_t addr;
|
||||||
|
|
||||||
|
spin_lock(&vpfe->dma_queue_lock);
|
||||||
|
if (list_empty(&vpfe->dma_queue)) {
|
||||||
|
spin_unlock(&vpfe->dma_queue_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
vpfe->next_frm = list_entry(vpfe->dma_queue.next,
|
vpfe->next_frm = list_entry(vpfe->dma_queue.next,
|
||||||
struct vpfe_cap_buffer, list);
|
struct vpfe_cap_buffer, list);
|
||||||
list_del(&vpfe->next_frm->list);
|
list_del(&vpfe->next_frm->list);
|
||||||
|
spin_unlock(&vpfe->dma_queue_lock);
|
||||||
|
|
||||||
vpfe_set_sdr_addr(&vpfe->ccdc,
|
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
|
||||||
vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
|
vpfe_set_sdr_addr(&vpfe->ccdc, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
|
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
dma_addr_t addr;
|
||||||
|
|
||||||
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
|
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
|
||||||
vpfe->field_off;
|
vpfe->field_off;
|
||||||
@ -1273,35 +1280,14 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
|
|||||||
vpfe->cur_frm = vpfe->next_frm;
|
vpfe->cur_frm = vpfe->next_frm;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
|
||||||
* vpfe_isr : ISR handler for vpfe capture (VINT0)
|
enum v4l2_field field)
|
||||||
* @irq: irq number
|
|
||||||
* @dev_id: dev_id ptr
|
|
||||||
*
|
|
||||||
* It changes status of the captured buffer, takes next buffer from the queue
|
|
||||||
* and sets its address in VPFE registers
|
|
||||||
*/
|
|
||||||
static irqreturn_t vpfe_isr(int irq, void *dev)
|
|
||||||
{
|
{
|
||||||
struct vpfe_device *vpfe = (struct vpfe_device *)dev;
|
|
||||||
enum v4l2_field field;
|
|
||||||
int intr_status;
|
|
||||||
int fid;
|
int fid;
|
||||||
|
|
||||||
intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
|
|
||||||
|
|
||||||
if (intr_status & VPFE_VDINT0) {
|
|
||||||
field = vpfe->fmt.fmt.pix.field;
|
|
||||||
|
|
||||||
if (field == V4L2_FIELD_NONE) {
|
|
||||||
/* handle progressive frame capture */
|
|
||||||
if (vpfe->cur_frm != vpfe->next_frm)
|
|
||||||
vpfe_process_buffer_complete(vpfe);
|
|
||||||
goto next_intr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* interlaced or TB capture check which field
|
/* interlaced or TB capture check which field
|
||||||
we are in hardware */
|
* we are in hardware
|
||||||
|
*/
|
||||||
fid = vpfe_ccdc_getfid(&vpfe->ccdc);
|
fid = vpfe_ccdc_getfid(&vpfe->ccdc);
|
||||||
|
|
||||||
/* switch the software maintained field id */
|
/* switch the software maintained field id */
|
||||||
@ -1316,6 +1302,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev)
|
|||||||
*/
|
*/
|
||||||
if (vpfe->cur_frm != vpfe->next_frm)
|
if (vpfe->cur_frm != vpfe->next_frm)
|
||||||
vpfe_process_buffer_complete(vpfe);
|
vpfe_process_buffer_complete(vpfe);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* based on whether the two fields are stored
|
* based on whether the two fields are stored
|
||||||
* interleave or separately in memory,
|
* interleave or separately in memory,
|
||||||
@ -1323,20 +1310,16 @@ static irqreturn_t vpfe_isr(int irq, void *dev)
|
|||||||
*/
|
*/
|
||||||
if (field == V4L2_FIELD_SEQ_TB)
|
if (field == V4L2_FIELD_SEQ_TB)
|
||||||
vpfe_schedule_bottom_field(vpfe);
|
vpfe_schedule_bottom_field(vpfe);
|
||||||
|
} else {
|
||||||
goto next_intr;
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* if one field is just being captured configure
|
* if one field is just being captured configure
|
||||||
* the next frame get the next frame from the empty
|
* the next frame get the next frame from the empty
|
||||||
* queue if no frame is available hold on to the
|
* queue if no frame is available hold on to the
|
||||||
* current buffer
|
* current buffer
|
||||||
*/
|
*/
|
||||||
spin_lock(&vpfe->dma_queue_lock);
|
if (vpfe->cur_frm == vpfe->next_frm)
|
||||||
if (!list_empty(&vpfe->dma_queue) &&
|
|
||||||
vpfe->cur_frm == vpfe->next_frm)
|
|
||||||
vpfe_schedule_next_buffer(vpfe);
|
vpfe_schedule_next_buffer(vpfe);
|
||||||
spin_unlock(&vpfe->dma_queue_lock);
|
}
|
||||||
} else if (fid == 0) {
|
} else if (fid == 0) {
|
||||||
/*
|
/*
|
||||||
* out of sync. Recover from any hardware out-of-sync.
|
* out of sync. Recover from any hardware out-of-sync.
|
||||||
@ -1344,16 +1327,37 @@ static irqreturn_t vpfe_isr(int irq, void *dev)
|
|||||||
*/
|
*/
|
||||||
vpfe->field = fid;
|
vpfe->field = fid;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vpfe_isr : ISR handler for vpfe capture (VINT0)
|
||||||
|
* @irq: irq number
|
||||||
|
* @dev_id: dev_id ptr
|
||||||
|
*
|
||||||
|
* It changes status of the captured buffer, takes next buffer from the queue
|
||||||
|
* and sets its address in VPFE registers
|
||||||
|
*/
|
||||||
|
static irqreturn_t vpfe_isr(int irq, void *dev)
|
||||||
|
{
|
||||||
|
struct vpfe_device *vpfe = (struct vpfe_device *)dev;
|
||||||
|
enum v4l2_field field = vpfe->fmt.fmt.pix.field;
|
||||||
|
int intr_status;
|
||||||
|
|
||||||
|
intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
|
||||||
|
|
||||||
|
if (intr_status & VPFE_VDINT0) {
|
||||||
|
if (field == V4L2_FIELD_NONE) {
|
||||||
|
if (vpfe->cur_frm != vpfe->next_frm)
|
||||||
|
vpfe_process_buffer_complete(vpfe);
|
||||||
|
} else {
|
||||||
|
vpfe_handle_interlaced_irq(vpfe, field);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
next_intr:
|
|
||||||
if (intr_status & VPFE_VDINT1) {
|
if (intr_status & VPFE_VDINT1) {
|
||||||
spin_lock(&vpfe->dma_queue_lock);
|
if (field == V4L2_FIELD_NONE &&
|
||||||
if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
|
|
||||||
!list_empty(&vpfe->dma_queue) &&
|
|
||||||
vpfe->cur_frm == vpfe->next_frm)
|
vpfe->cur_frm == vpfe->next_frm)
|
||||||
vpfe_schedule_next_buffer(vpfe);
|
vpfe_schedule_next_buffer(vpfe);
|
||||||
spin_unlock(&vpfe->dma_queue_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vpfe_clear_intr(&vpfe->ccdc, intr_status);
|
vpfe_clear_intr(&vpfe->ccdc, intr_status);
|
||||||
|
Loading…
Reference in New Issue
Block a user