Commit a10234fd authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Partial abandonment of legacy DRM logging macros

Convert some usages of legacy DRM logging macros into versions which tell
us on which device have the events occurred.

v2:
 * Don't have struct drm_device as local. (Jani, Ville)

v3:
 * Store gt, not i915, in workaround list. (John)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Acked-by: default avatarJani Nikula <jani.nikula@intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221109104633.2579245-1-tvrtko.ursulin@linux.intel.com
parent 8c949515
...@@ -546,7 +546,7 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) ...@@ -546,7 +546,7 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
} }
if (intel_engine_uses_guc(master)) { if (intel_engine_uses_guc(master)) {
DRM_DEBUG("bonding extension not supported with GuC submission"); drm_dbg(&i915->drm, "bonding extension not supported with GuC submission");
return -ENODEV; return -ENODEV;
} }
......
...@@ -2148,7 +2148,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -2148,7 +2148,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
return err; return err;
} }
static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) static int i915_gem_check_execbuffer(struct drm_i915_private *i915,
struct drm_i915_gem_execbuffer2 *exec)
{ {
if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
return -EINVAL; return -EINVAL;
...@@ -2161,7 +2162,7 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) ...@@ -2161,7 +2162,7 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
} }
if (exec->DR4 == 0xffffffff) { if (exec->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); drm_dbg(&i915->drm, "UXA submitting garbage DR4, fixing up\n");
exec->DR4 = 0; exec->DR4 = 0;
} }
if (exec->DR1 || exec->DR4) if (exec->DR1 || exec->DR4)
...@@ -2799,7 +2800,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb, ...@@ -2799,7 +2800,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb,
syncobj = drm_syncobj_find(eb->file, user_fence.handle); syncobj = drm_syncobj_find(eb->file, user_fence.handle);
if (!syncobj) { if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n"); drm_dbg(&eb->i915->drm,
"Invalid syncobj handle provided\n");
return -ENOENT; return -ENOENT;
} }
...@@ -2807,7 +2809,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb, ...@@ -2807,7 +2809,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb,
if (!fence && user_fence.flags && if (!fence && user_fence.flags &&
!(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
DRM_DEBUG("Syncobj handle has no fence\n"); drm_dbg(&eb->i915->drm,
"Syncobj handle has no fence\n");
drm_syncobj_put(syncobj); drm_syncobj_put(syncobj);
return -EINVAL; return -EINVAL;
} }
...@@ -2816,7 +2819,9 @@ add_timeline_fence_array(struct i915_execbuffer *eb, ...@@ -2816,7 +2819,9 @@ add_timeline_fence_array(struct i915_execbuffer *eb,
err = dma_fence_chain_find_seqno(&fence, point); err = dma_fence_chain_find_seqno(&fence, point);
if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
DRM_DEBUG("Syncobj handle missing requested point %llu\n", point); drm_dbg(&eb->i915->drm,
"Syncobj handle missing requested point %llu\n",
point);
dma_fence_put(fence); dma_fence_put(fence);
drm_syncobj_put(syncobj); drm_syncobj_put(syncobj);
return err; return err;
...@@ -2842,7 +2847,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb, ...@@ -2842,7 +2847,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb,
* 0) would break the timeline. * 0) would break the timeline.
*/ */
if (user_fence.flags & I915_EXEC_FENCE_WAIT) { if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
DRM_DEBUG("Trying to wait & signal the same timeline point.\n"); drm_dbg(&eb->i915->drm,
"Trying to wait & signal the same timeline point.\n");
dma_fence_put(fence); dma_fence_put(fence);
drm_syncobj_put(syncobj); drm_syncobj_put(syncobj);
return -EINVAL; return -EINVAL;
...@@ -2913,14 +2919,16 @@ static int add_fence_array(struct i915_execbuffer *eb) ...@@ -2913,14 +2919,16 @@ static int add_fence_array(struct i915_execbuffer *eb)
syncobj = drm_syncobj_find(eb->file, user_fence.handle); syncobj = drm_syncobj_find(eb->file, user_fence.handle);
if (!syncobj) { if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n"); drm_dbg(&eb->i915->drm,
"Invalid syncobj handle provided\n");
return -ENOENT; return -ENOENT;
} }
if (user_fence.flags & I915_EXEC_FENCE_WAIT) { if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
fence = drm_syncobj_fence_get(syncobj); fence = drm_syncobj_fence_get(syncobj);
if (!fence) { if (!fence) {
DRM_DEBUG("Syncobj handle has no fence\n"); drm_dbg(&eb->i915->drm,
"Syncobj handle has no fence\n");
drm_syncobj_put(syncobj); drm_syncobj_put(syncobj);
return -EINVAL; return -EINVAL;
} }
...@@ -3515,7 +3523,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, ...@@ -3515,7 +3523,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
err = i915_gem_check_execbuffer(args); err = i915_gem_check_execbuffer(i915, args);
if (err) if (err)
return err; return err;
......
...@@ -3921,6 +3921,7 @@ static struct intel_context * ...@@ -3921,6 +3921,7 @@ static struct intel_context *
execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned long flags) unsigned long flags)
{ {
struct drm_i915_private *i915 = siblings[0]->i915;
struct virtual_engine *ve; struct virtual_engine *ve;
unsigned int n; unsigned int n;
int err; int err;
...@@ -3929,7 +3930,7 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, ...@@ -3929,7 +3930,7 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
if (!ve) if (!ve)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ve->base.i915 = siblings[0]->i915; ve->base.i915 = i915;
ve->base.gt = siblings[0]->gt; ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore; ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1; ve->base.id = -1;
...@@ -3988,8 +3989,9 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, ...@@ -3988,8 +3989,9 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
GEM_BUG_ON(!is_power_of_2(sibling->mask)); GEM_BUG_ON(!is_power_of_2(sibling->mask));
if (sibling->mask & ve->base.mask) { if (sibling->mask & ve->base.mask) {
DRM_DEBUG("duplicate %s entry in load balancer\n", drm_dbg(&i915->drm,
sibling->name); "duplicate %s entry in load balancer\n",
sibling->name);
err = -EINVAL; err = -EINVAL;
goto err_put; goto err_put;
} }
...@@ -4023,8 +4025,9 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, ...@@ -4023,8 +4025,9 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
*/ */
if (ve->base.class != OTHER_CLASS) { if (ve->base.class != OTHER_CLASS) {
if (ve->base.class != sibling->class) { if (ve->base.class != sibling->class) {
DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", drm_dbg(&i915->drm,
sibling->class, ve->base.class); "invalid mixing of engine class, sibling %d, already %d\n",
sibling->class, ve->base.class);
err = -EINVAL; err = -EINVAL;
goto err_put; goto err_put;
} }
......
...@@ -816,8 +816,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, ...@@ -816,8 +816,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
if (obj->bit_17 == NULL) { if (obj->bit_17 == NULL) {
obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL); obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
if (obj->bit_17 == NULL) { if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 " drm_err(&to_i915(obj->base.dev)->drm,
"record\n"); "Failed to allocate memory for bit 17 record\n");
return; return;
} }
} }
......
...@@ -190,7 +190,7 @@ int intel_gt_init_hw(struct intel_gt *gt) ...@@ -190,7 +190,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
ret = i915_ppgtt_init_hw(gt); ret = i915_ppgtt_init_hw(gt);
if (ret) { if (ret) {
DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); drm_err(&i915->drm, "Enabling PPGTT failed (%d)\n", ret);
goto out; goto out;
} }
...@@ -262,7 +262,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt, ...@@ -262,7 +262,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
* some errors might have become stuck, * some errors might have become stuck,
* mask them. * mask them.
*/ */
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); drm_dbg(&gt->i915->drm, "EIR stuck: 0x%08x, masking\n", eir);
rmw_set(uncore, EMR, eir); rmw_set(uncore, EMR, eir);
intel_uncore_write(uncore, GEN2_IIR, intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT); I915_MASTER_ERROR_INTERRUPT);
......
...@@ -44,8 +44,9 @@ gen11_gt_engine_identity(struct intel_gt *gt, ...@@ -44,8 +44,9 @@ gen11_gt_engine_identity(struct intel_gt *gt,
!time_after32(local_clock() >> 10, timeout_ts)); !time_after32(local_clock() >> 10, timeout_ts));
if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", drm_err(&gt->i915->drm,
bank, bit, ident); "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
bank, bit, ident);
return 0; return 0;
} }
...@@ -364,7 +365,8 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) ...@@ -364,7 +365,8 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT |
GT_CS_MASTER_ERROR_INTERRUPT)) GT_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); drm_dbg(&gt->i915->drm, "Command parser error, gt_iir 0x%08x\n",
gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915)) if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir); gen7_parity_error_irq_handler(gt, gt_iir);
......
...@@ -430,7 +430,8 @@ static int __gen5_rps_set(struct intel_rps *rps, u8 val) ...@@ -430,7 +430,8 @@ static int __gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) { if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n"); drm_dbg(&rps_to_i915(rps)->drm,
"gpu busy, RCS change rejected\n");
return -EBUSY; /* still busy with another command */ return -EBUSY; /* still busy with another command */
} }
...@@ -1953,7 +1954,8 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) ...@@ -1953,7 +1954,8 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10); intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); drm_dbg(&rps_to_i915(rps)->drm,
"Command parser error, pm_iir 0x%08x\n", pm_iir);
} }
void gen5_rps_irq_handler(struct intel_rps *rps) void gen5_rps_irq_handler(struct intel_rps *rps)
......
...@@ -55,8 +55,10 @@ ...@@ -55,8 +55,10 @@
* - Public functions to init or apply the given workaround type. * - Public functions to init or apply the given workaround type.
*/ */
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
const char *name, const char *engine_name)
{ {
wal->gt = gt;
wal->name = name; wal->name = name;
wal->engine_name = engine_name; wal->engine_name = engine_name;
} }
...@@ -80,13 +82,14 @@ static void wa_init_finish(struct i915_wa_list *wal) ...@@ -80,13 +82,14 @@ static void wa_init_finish(struct i915_wa_list *wal)
if (!wal->count) if (!wal->count)
return; return;
DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n", drm_dbg(&wal->gt->i915->drm, "Initialized %u %s workarounds on %s\n",
wal->wa_count, wal->name, wal->engine_name); wal->wa_count, wal->name, wal->engine_name);
} }
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
{ {
unsigned int addr = i915_mmio_reg_offset(wa->reg); unsigned int addr = i915_mmio_reg_offset(wa->reg);
struct drm_i915_private *i915 = wal->gt->i915;
unsigned int start = 0, end = wal->count; unsigned int start = 0, end = wal->count;
const unsigned int grow = WA_LIST_CHUNK; const unsigned int grow = WA_LIST_CHUNK;
struct i915_wa *wa_; struct i915_wa *wa_;
...@@ -99,7 +102,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) ...@@ -99,7 +102,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
GFP_KERNEL); GFP_KERNEL);
if (!list) { if (!list) {
DRM_ERROR("No space for workaround init!\n"); drm_err(&i915->drm, "No space for workaround init!\n");
return; return;
} }
...@@ -122,9 +125,10 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) ...@@ -122,9 +125,10 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
wa_ = &wal->list[mid]; wa_ = &wal->list[mid];
if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) { if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n", drm_err(&i915->drm,
i915_mmio_reg_offset(wa_->reg), "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
wa_->clr, wa_->set); i915_mmio_reg_offset(wa_->reg),
wa_->clr, wa_->set);
wa_->set &= ~wa->clr; wa_->set &= ~wa->clr;
} }
...@@ -826,7 +830,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, ...@@ -826,7 +830,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
wa_init_start(wal, name, engine->name); wa_init_start(wal, engine->gt, name, engine->name);
/* Applies to all engines */ /* Applies to all engines */
/* /*
...@@ -1676,7 +1680,7 @@ void intel_gt_init_workarounds(struct intel_gt *gt) ...@@ -1676,7 +1680,7 @@ void intel_gt_init_workarounds(struct intel_gt *gt)
{ {
struct i915_wa_list *wal = &gt->wa_list; struct i915_wa_list *wal = &gt->wa_list;
wa_init_start(wal, "GT", "global"); wa_init_start(wal, gt, "GT", "global");
gt_init_workarounds(gt, wal); gt_init_workarounds(gt, wal);
wa_init_finish(wal); wa_init_finish(wal);
} }
...@@ -1698,12 +1702,14 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) ...@@ -1698,12 +1702,14 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
} }
static bool static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
const char *name, const char *from)
{ {
if ((cur ^ wa->set) & wa->read) { if ((cur ^ wa->set) & wa->read) {
DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n", drm_err(&gt->i915->drm,
name, from, i915_mmio_reg_offset(wa->reg), "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
cur, cur & wa->read, wa->set & wa->read); name, from, i915_mmio_reg_offset(wa->reg),
cur, cur & wa->read, wa->set & wa->read);
return false; return false;
} }
...@@ -1749,7 +1755,7 @@ wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal) ...@@ -1749,7 +1755,7 @@ wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg); intel_uncore_read_fw(uncore, wa->reg);
wa_verify(wa, val, wal->name, "application"); wa_verify(wal->gt, wa, val, wal->name, "application");
} }
} }
...@@ -1779,7 +1785,7 @@ static bool wa_list_verify(struct intel_gt *gt, ...@@ -1779,7 +1785,7 @@ static bool wa_list_verify(struct intel_gt *gt,
intel_uncore_forcewake_get__locked(uncore, fw); intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
ok &= wa_verify(wa, wa->is_mcr ? ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg), intel_uncore_read_fw(uncore, wa->reg),
wal->name, from); wal->name, from);
...@@ -2127,7 +2133,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) ...@@ -2127,7 +2133,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist; struct i915_wa_list *w = &engine->whitelist;
wa_init_start(w, "whitelist", engine->name); wa_init_start(w, engine->gt, "whitelist", engine->name);
if (IS_PONTEVECCHIO(i915)) if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine); pvc_whitelist_build(engine);
...@@ -3012,7 +3018,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) ...@@ -3012,7 +3018,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) < 4) if (GRAPHICS_VER(engine->i915) < 4)
return; return;
wa_init_start(wal, "engine", engine->name); wa_init_start(wal, engine->gt, "engine", engine->name);
engine_init_workarounds(engine, wal); engine_init_workarounds(engine, wal);
wa_init_finish(wal); wa_init_finish(wal);
} }
...@@ -3193,7 +3199,7 @@ static int engine_wa_list_verify(struct intel_context *ce, ...@@ -3193,7 +3199,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg))) if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
continue; continue;
if (!wa_verify(wa, results[i], wal->name, from)) if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
err = -ENXIO; err = -ENXIO;
} }
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include "i915_reg_defs.h" #include "i915_reg_defs.h"
struct intel_gt;
struct i915_wa { struct i915_wa {
union { union {
i915_reg_t reg; i915_reg_t reg;
...@@ -24,6 +26,7 @@ struct i915_wa { ...@@ -24,6 +26,7 @@ struct i915_wa {
}; };
struct i915_wa_list { struct i915_wa_list {
struct intel_gt *gt;
const char *name; const char *name;
const char *engine_name; const char *engine_name;
struct i915_wa *list; struct i915_wa *list;
......
...@@ -66,14 +66,14 @@ reference_lists_init(struct intel_gt *gt, struct wa_lists *lists) ...@@ -66,14 +66,14 @@ reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists)); memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); wa_init_start(&lists->gt_wa_list, gt, "GT_REF", "global");
gt_init_workarounds(gt, &lists->gt_wa_list); gt_init_workarounds(gt, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list); wa_init_finish(&lists->gt_wa_list);
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list; struct i915_wa_list *wal = &lists->engine[id].wa_list;
wa_init_start(wal, "REF", engine->name); wa_init_start(wal, gt, "REF", engine->name);
engine_init_workarounds(engine, wal); engine_init_workarounds(engine, wal);
wa_init_finish(wal); wa_init_finish(wal);
......
...@@ -688,8 +688,8 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -688,8 +688,8 @@ i915_drop_caches_set(void *data, u64 val)
unsigned int flags; unsigned int flags;
int ret; int ret;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", drm_dbg(&i915->drm, "Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL); val, val & DROP_ALL);
ret = gt_drop_caches(to_gt(i915), val); ret = gt_drop_caches(to_gt(i915), val);
if (ret) if (ret)
......
...@@ -1286,7 +1286,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) ...@@ -1286,7 +1286,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
struct i915_drm_client *client; struct i915_drm_client *client;
int ret = -ENOMEM; int ret = -ENOMEM;
DRM_DEBUG("\n"); drm_dbg(&i915->drm, "\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv) if (!file_priv)
......
...@@ -179,7 +179,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -179,7 +179,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_perf_oa_timestamp_frequency(i915); value = i915_perf_oa_timestamp_frequency(i915);
break; break;
default: default:
DRM_DEBUG("Unknown parameter %d\n", param->param); drm_dbg(&i915->drm, "Unknown parameter %d\n", param->param);
return -EINVAL; return -EINVAL;
} }
......
...@@ -1086,8 +1086,9 @@ static void ivb_parity_work(struct work_struct *work) ...@@ -1086,8 +1086,9 @@ static void ivb_parity_work(struct work_struct *work)
kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
KOBJ_CHANGE, parity_event); KOBJ_CHANGE, parity_event);
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", drm_dbg(&dev_priv->drm,
slice, row, bank, subbank); "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
slice, row, bank, subbank);
kfree(parity_event[4]); kfree(parity_event[4]);
kfree(parity_event[3]); kfree(parity_event[3]);
...@@ -2774,7 +2775,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) ...@@ -2774,7 +2775,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
} else { } else {
DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl); drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
master_tile_ctl);
dg1_master_intr_enable(regs); dg1_master_intr_enable(regs);
return IRQ_NONE; return IRQ_NONE;
} }
...@@ -3940,7 +3942,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915, ...@@ -3940,7 +3942,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915,
static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
u16 eir, u16 eir_stuck) u16 eir, u16 eir_stuck)
{ {
DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
if (eir_stuck) if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
...@@ -3975,7 +3977,7 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, ...@@ -3975,7 +3977,7 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
u32 eir, u32 eir_stuck) u32 eir, u32 eir_stuck)
{ {
DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
if (eir_stuck) if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
......
...@@ -530,9 +530,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) ...@@ -530,9 +530,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
if (OA_TAKEN(hw_tail, tail) > report_size && if (OA_TAKEN(hw_tail, tail) > report_size &&
__ratelimit(&stream->perf->tail_pointer_race)) __ratelimit(&stream->perf->tail_pointer_race))
DRM_NOTE("unlanded report(s) head=0x%x " drm_notice(&stream->uncore->i915->drm,
"tail=0x%x hw_tail=0x%x\n", "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
head, tail, hw_tail); head, tail, hw_tail);
stream->oa_buffer.tail = gtt_offset + tail; stream->oa_buffer.tail = gtt_offset + tail;
stream->oa_buffer.aging_tail = gtt_offset + hw_tail; stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
...@@ -1015,7 +1015,8 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, ...@@ -1015,7 +1015,8 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
*/ */
if (report32[0] == 0) { if (report32[0] == 0) {
if (__ratelimit(&stream->perf->spurious_report_rs)) if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n"); drm_notice(&uncore->i915->drm,
"Skipping spurious, invalid OA report\n");
continue; continue;
} }
...@@ -1602,8 +1603,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) ...@@ -1602,8 +1603,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_noa_wait(stream); free_noa_wait(stream);
if (perf->spurious_report_rs.missed) { if (perf->spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", drm_notice(&gt->i915->drm,
perf->spurious_report_rs.missed); "%d spurious OA report notices suppressed due to ratelimiting\n",
perf->spurious_report_rs.missed);
} }
} }
......
...@@ -250,8 +250,9 @@ static int query_perf_config_data(struct drm_i915_private *i915, ...@@ -250,8 +250,9 @@ static int query_perf_config_data(struct drm_i915_private *i915,
return total_size; return total_size;
if (query_item->length < total_size) { if (query_item->length < total_size) {
DRM_DEBUG("Invalid query config data item size=%u expected=%u\n", drm_dbg(&i915->drm,
query_item->length, total_size); "Invalid query config data item size=%u expected=%u\n",
query_item->length, total_size);
return -EINVAL; return -EINVAL;
} }
...@@ -418,9 +419,10 @@ static int query_perf_config_list(struct drm_i915_private *i915, ...@@ -418,9 +419,10 @@ static int query_perf_config_list(struct drm_i915_private *i915,
} while (n_configs > alloc); } while (n_configs > alloc);
if (query_item->length < sizeof_perf_config_list(n_configs)) { if (query_item->length < sizeof_perf_config_list(n_configs)) {
DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n", drm_dbg(&i915->drm,
query_item->length, "Invalid query config list item size=%u expected=%zu\n",
sizeof_perf_config_list(n_configs)); query_item->length,
sizeof_perf_config_list(n_configs));
kfree(oa_config_ids); kfree(oa_config_ids);
return -EINVAL; return -EINVAL;
} }
......
...@@ -218,7 +218,8 @@ static const struct bin_attribute error_state_attr = { ...@@ -218,7 +218,8 @@ static const struct bin_attribute error_state_attr = {
static void i915_setup_error_capture(struct device *kdev) static void i915_setup_error_capture(struct device *kdev)
{ {
if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
DRM_ERROR("error_state sysfs setup failed\n"); drm_err(&kdev_minor_to_i915(kdev)->drm,
"error_state sysfs setup failed\n");
} }
static void i915_teardown_error_capture(struct device *kdev) static void i915_teardown_error_capture(struct device *kdev)
......
...@@ -73,14 +73,16 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) ...@@ -73,14 +73,16 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
char buf[512]; char buf[512];
if (!vma->node.stack) { if (!vma->node.stack) {
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", drm_dbg(&to_i915(vma->obj->base.dev)->drm
vma->node.start, vma->node.size, reason); "vma.node [%08llx + %08llx] %s: unknown owner\n",
vma->node.start, vma->node.size, reason);
return; return;
} }
stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", drm_dbg(&to_i915(vma->obj->base.dev)->drm,
vma->node.start, vma->node.size, reason, buf); "vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
} }
#else #else
...@@ -782,9 +784,9 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, ...@@ -782,9 +784,9 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
* attempt to find space. * attempt to find space.
*/ */
if (size > end) { if (size > end) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", drm_dbg(&to_i915(vma->obj->base.dev)->drm,
size, flags & PIN_MAPPABLE ? "mappable" : "total", "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
end); size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
return -ENOSPC; return -ENOSPC;
} }
......
...@@ -178,8 +178,9 @@ static inline void ...@@ -178,8 +178,9 @@ static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{ {
if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", drm_err(&d->uncore->i915->drm,
intel_uncore_forcewake_domain_to_str(d->id)); "%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
} }
} }
...@@ -226,11 +227,12 @@ fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, ...@@ -226,11 +227,12 @@ fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
} while (!ack_detected && pass++ < 10); } while (!ack_detected && pass++ < 10);
DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", drm_dbg(&d->uncore->i915->drm,
intel_uncore_forcewake_domain_to_str(d->id), "%s had to use fallback to %s ack, 0x%x (passes %u)\n",
type == ACK_SET ? "set" : "clear", intel_uncore_forcewake_domain_to_str(d->id),
fw_ack(d), type == ACK_SET ? "set" : "clear",
pass); fw_ack(d),
pass);
return ack_detected ? 0 : -ETIMEDOUT; return ack_detected ? 0 : -ETIMEDOUT;
} }
...@@ -255,8 +257,9 @@ static inline void ...@@ -255,8 +257,9 @@ static inline void
fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
{ {
if (wait_ack_set(d, FORCEWAKE_KERNEL)) { if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", drm_err(&d->uncore->i915->drm,
intel_uncore_forcewake_domain_to_str(d->id)); "%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment