Commit e6784f9e authored by Benoit Parrot's avatar Benoit Parrot Committed by Mauro Carvalho Chehab

media: am437x-vpfe: Rework ISR routine for clarity

Make the ISR code simpler to follow by removing goto and
relocating/eliminating duplicate spinlock accesses.
Signed-off-by: default avatarBenoit Parrot <bparrot@ti.com>
Acked-by: default avatarLad Prabhakar <prabhakar.csengg@gmail.com>
Signed-off-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
parent 47c7bcfd
...@@ -1233,22 +1233,29 @@ static int vpfe_open(struct file *file) ...@@ -1233,22 +1233,29 @@ static int vpfe_open(struct file *file)
* This function will get next buffer from the dma queue and * This function will get next buffer from the dma queue and
* set the buffer address in the vpfe register for capture. * set the buffer address in the vpfe register for capture.
* the buffer is marked active * the buffer is marked active
*
* Assumes caller is holding vpfe->dma_queue_lock already
*/ */
static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe) static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
{ {
dma_addr_t addr;
spin_lock(&vpfe->dma_queue_lock);
if (list_empty(&vpfe->dma_queue)) {
spin_unlock(&vpfe->dma_queue_lock);
return;
}
vpfe->next_frm = list_entry(vpfe->dma_queue.next, vpfe->next_frm = list_entry(vpfe->dma_queue.next,
struct vpfe_cap_buffer, list); struct vpfe_cap_buffer, list);
list_del(&vpfe->next_frm->list); list_del(&vpfe->next_frm->list);
spin_unlock(&vpfe->dma_queue_lock);
vpfe_set_sdr_addr(&vpfe->ccdc, addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0)); vpfe_set_sdr_addr(&vpfe->ccdc, addr);
} }
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe) static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
{ {
unsigned long addr; dma_addr_t addr;
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) + addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
vpfe->field_off; vpfe->field_off;
...@@ -1273,6 +1280,55 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe) ...@@ -1273,6 +1280,55 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
vpfe->cur_frm = vpfe->next_frm; vpfe->cur_frm = vpfe->next_frm;
} }
static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
enum v4l2_field field)
{
int fid;
/* interlaced or TB capture check which field
* we are in hardware
*/
fid = vpfe_ccdc_getfid(&vpfe->ccdc);
/* switch the software maintained field id */
vpfe->field ^= 1;
if (fid == vpfe->field) {
/* we are in-sync here,continue */
if (fid == 0) {
/*
* One frame is just being captured. If the
* next frame is available, release the
* current frame and move on
*/
if (vpfe->cur_frm != vpfe->next_frm)
vpfe_process_buffer_complete(vpfe);
/*
* based on whether the two fields are stored
* interleave or separately in memory,
* reconfigure the CCDC memory address
*/
if (field == V4L2_FIELD_SEQ_TB)
vpfe_schedule_bottom_field(vpfe);
} else {
/*
* if one field is just being captured configure
* the next frame get the next frame from the empty
* queue if no frame is available hold on to the
* current buffer
*/
if (vpfe->cur_frm == vpfe->next_frm)
vpfe_schedule_next_buffer(vpfe);
}
} else if (fid == 0) {
/*
* out of sync. Recover from any hardware out-of-sync.
* May loose one frame
*/
vpfe->field = fid;
}
}
/* /*
* vpfe_isr : ISR handler for vpfe capture (VINT0) * vpfe_isr : ISR handler for vpfe capture (VINT0)
* @irq: irq number * @irq: irq number
...@@ -1284,76 +1340,24 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe) ...@@ -1284,76 +1340,24 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
static irqreturn_t vpfe_isr(int irq, void *dev) static irqreturn_t vpfe_isr(int irq, void *dev)
{ {
struct vpfe_device *vpfe = (struct vpfe_device *)dev; struct vpfe_device *vpfe = (struct vpfe_device *)dev;
enum v4l2_field field; enum v4l2_field field = vpfe->fmt.fmt.pix.field;
int intr_status; int intr_status;
int fid;
intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS); intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
if (intr_status & VPFE_VDINT0) { if (intr_status & VPFE_VDINT0) {
field = vpfe->fmt.fmt.pix.field;
if (field == V4L2_FIELD_NONE) { if (field == V4L2_FIELD_NONE) {
/* handle progressive frame capture */
if (vpfe->cur_frm != vpfe->next_frm) if (vpfe->cur_frm != vpfe->next_frm)
vpfe_process_buffer_complete(vpfe); vpfe_process_buffer_complete(vpfe);
goto next_intr; } else {
} vpfe_handle_interlaced_irq(vpfe, field);
/* interlaced or TB capture check which field
we are in hardware */
fid = vpfe_ccdc_getfid(&vpfe->ccdc);
/* switch the software maintained field id */
vpfe->field ^= 1;
if (fid == vpfe->field) {
/* we are in-sync here,continue */
if (fid == 0) {
/*
* One frame is just being captured. If the
* next frame is available, release the
* current frame and move on
*/
if (vpfe->cur_frm != vpfe->next_frm)
vpfe_process_buffer_complete(vpfe);
/*
* based on whether the two fields are stored
* interleave or separately in memory,
* reconfigure the CCDC memory address
*/
if (field == V4L2_FIELD_SEQ_TB)
vpfe_schedule_bottom_field(vpfe);
goto next_intr;
}
/*
* if one field is just being captured configure
* the next frame get the next frame from the empty
* queue if no frame is available hold on to the
* current buffer
*/
spin_lock(&vpfe->dma_queue_lock);
if (!list_empty(&vpfe->dma_queue) &&
vpfe->cur_frm == vpfe->next_frm)
vpfe_schedule_next_buffer(vpfe);
spin_unlock(&vpfe->dma_queue_lock);
} else if (fid == 0) {
/*
* out of sync. Recover from any hardware out-of-sync.
* May loose one frame
*/
vpfe->field = fid;
} }
} }
next_intr:
if (intr_status & VPFE_VDINT1) { if (intr_status & VPFE_VDINT1) {
spin_lock(&vpfe->dma_queue_lock); if (field == V4L2_FIELD_NONE &&
if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
!list_empty(&vpfe->dma_queue) &&
vpfe->cur_frm == vpfe->next_frm) vpfe->cur_frm == vpfe->next_frm)
vpfe_schedule_next_buffer(vpfe); vpfe_schedule_next_buffer(vpfe);
spin_unlock(&vpfe->dma_queue_lock);
} }
vpfe_clear_intr(&vpfe->ccdc, intr_status); vpfe_clear_intr(&vpfe->ccdc, intr_status);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment