Commit ea4842ed authored by Dmitry Baryshkov's avatar Dmitry Baryshkov

drm/msm/dpu: add helper to get IRQ-related data

In preparation to reworking IRQ indices, move irq_tbl access to
a separate helper.
Reviewed-by: default avatarMarijn Suijten <marijn.suijten@somainline.org>
Signed-off-by: default avatarDmitry Baryshkov <dmitry.baryshkov@linaro.org>
Patchwork: https://patchwork.freedesktop.org/patch/550931/
Link: https://lore.kernel.org/r/20230802100426.4184892-5-dmitry.baryshkov@linaro.org
parent a6526483
......@@ -206,6 +206,12 @@ static inline bool dpu_core_irq_is_valid(struct dpu_hw_intr *intr,
return irq_idx >= 0 && irq_idx < intr->total_irqs;
}
static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
int irq_idx)
{
return &intr->irq_tbl[irq_idx];
}
/**
* dpu_core_irq_callback_handler - dispatch core interrupts
* @dpu_kms: Pointer to DPU's KMS structure
......@@ -213,17 +219,19 @@ static inline bool dpu_core_irq_is_valid(struct dpu_hw_intr *intr,
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
VERB("irq_idx=%d\n", irq_idx);
if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
if (!irq_entry->cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
atomic_inc(&irq_entry->count);
/*
* Perform registered function callback
*/
dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg);
irq_entry->cb(irq_entry->arg);
}
irqreturn_t dpu_core_irq(struct msm_kms *kms)
......@@ -510,6 +518,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
void (*irq_cb)(void *arg),
void *irq_arg)
{
struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int ret;
......@@ -527,15 +536,16 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
if (unlikely(WARN_ON(irq_entry->cb))) {
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
return -EBUSY;
}
trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
irq_entry->arg = irq_arg;
irq_entry->cb = irq_cb;
ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
......@@ -552,6 +562,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int ret;
......@@ -570,8 +581,9 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
irq_idx, ret);
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
irq_entry->cb = NULL;
irq_entry->arg = NULL;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
......@@ -584,14 +596,16 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int i, irq_count;
void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
cb = dpu_kms->hw_intr->irq_tbl[i].cb;
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
irq_count = atomic_read(&irq_entry->count);
cb = irq_entry->cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (irq_count || cb)
......@@ -614,6 +628,7 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_core_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr_entry *irq_entry;
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
......@@ -621,22 +636,27 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
atomic_set(&irq_entry->count, 0);
}
}
void dpu_core_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr_entry *irq_entry;
int i;
if (!dpu_kms->hw_intr)
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
if (dpu_kms->hw_intr->irq_tbl[i].cb)
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
if (irq_entry->cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
}
dpu_clear_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
......
......@@ -38,6 +38,12 @@ enum dpu_hw_intr_reg {
#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
struct dpu_hw_intr_entry {
void (*cb)(void *arg);
void *arg;
atomic_t count;
};
/**
* struct dpu_hw_intr: hw interrupts handling data structure
* @hw: virtual address mapping
......@@ -57,11 +63,7 @@ struct dpu_hw_intr {
unsigned long irq_mask;
const struct dpu_intr_reg *intr_set;
struct {
void (*cb)(void *arg);
void *arg;
atomic_t count;
} irq_tbl[];
struct dpu_hw_intr_entry irq_tbl[];
};
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment