Commit 56442d83 authored by Andrzej Hajda's avatar Andrzej Hajda Committed by Inki Dae

drm/exynos/fimc: simplify buffer queuing

The patch removes redundant checks, redundant HW reads
and simplifies code.
Signed-off-by: default avatarAndrzej Hajda <a.hajda@samsung.com>
Reviewed-by: default avatarJoonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: default avatarInki Dae <inki.dae@samsung.com>
parent c7b3014b
......@@ -1119,67 +1119,34 @@ static int fimc_dst_set_size(struct device *dev, int swap,
return 0;
}
static int fimc_dst_get_buf_count(struct fimc_context *ctx)
{
u32 cfg, buf_num;
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
buf_num = hweight32(cfg);
DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
return buf_num;
}
static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
enum drm_exynos_ipp_buf_type buf_type)
{
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
bool enable;
u32 cfg;
u32 mask = 0x00000001 << buf_id;
int ret = 0;
unsigned long flags;
u32 buf_num;
u32 cfg;
DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
spin_lock_irqsave(&ctx->lock, flags);
/* mask register set */
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
switch (buf_type) {
case IPP_BUF_ENQUEUE:
enable = true;
break;
case IPP_BUF_DEQUEUE:
enable = false;
break;
default:
dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
ret = -EINVAL;
goto err_unlock;
}
if (buf_type == IPP_BUF_ENQUEUE)
cfg |= (1 << buf_id);
else
cfg &= ~(1 << buf_id);
/* sequence id */
cfg &= ~mask;
cfg |= (enable << buf_id);
fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
/* interrupt enable */
if (buf_type == IPP_BUF_ENQUEUE &&
fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
buf_num = hweight32(cfg);
/* interrupt disable */
if (buf_type == IPP_BUF_DEQUEUE &&
fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP)
if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP)
fimc_mask_irq(ctx, false);
err_unlock:
spin_unlock_irqrestore(&ctx->lock, flags);
return ret;
}
static int fimc_dst_set_addr(struct device *dev,
......@@ -1237,7 +1204,9 @@ static int fimc_dst_set_addr(struct device *dev,
break;
}
return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
return 0;
}
static struct exynos_drm_ipp_ops fimc_dst_ops = {
......@@ -1288,10 +1257,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
DRM_ERROR("failed to dequeue.\n");
return IRQ_HANDLED;
}
fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
event_work->ippdrv = ippdrv;
event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment