Commit bfc10936 authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'drm-intel-gt-next-2024-07-04' of...

Merge tag 'drm-intel-gt-next-2024-07-04' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-next

Driver Changes:

Fixes/improvements/new stuff:

- Downgrade stolen lmem setup warning [gem] (Jonathan Cavitt)
- Evaluate GuC priority within locks [gt/uc] (Andi Shyti)
- Fix potential UAF by revoke of fence registers [gt] (Janusz Krzysztofik)
- Return NULL instead of '0' [gem] (Andi Shyti)
- Use the correct format specifier for resource_size_t [gem] (Andi Shyti)
- Suppress oom warning in favour of ENOMEM to userspace [gem] (Nirmoy Das)

Miscellaneous:

- Evaluate forcewake usage within locks [gt] (Andi Shyti)
- Fix typo in comment [gt/uc] (Andi Shyti)
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Tvrtko Ursulin <tursulin@igalia.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZoZP6mUSergfzFMh@linux
parents 6be146cf 3b85152c
...@@ -936,8 +936,12 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, ...@@ -936,8 +936,12 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
} else { } else {
/* Use DSM base address instead for stolen memory */ /* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK; dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base)) if (lmem_size < dsm_base) {
return ERR_PTR(-ENODEV); drm_dbg(&i915->drm,
"Disabling stolen memory support due to OOB placement: lmem_size = %pa vs dsm_base = %pa\n",
&lmem_size, &dsm_base);
return NULL;
}
dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
} }
......
...@@ -71,6 +71,8 @@ static int fw_domains_show(struct seq_file *m, void *data) ...@@ -71,6 +71,8 @@ static int fw_domains_show(struct seq_file *m, void *data)
struct intel_uncore_forcewake_domain *fw_domain; struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp; unsigned int tmp;
spin_lock_irq(&uncore->lock);
seq_printf(m, "user.bypass_count = %u\n", seq_printf(m, "user.bypass_count = %u\n",
uncore->user_forcewake_count); uncore->user_forcewake_count);
...@@ -79,6 +81,8 @@ static int fw_domains_show(struct seq_file *m, void *data) ...@@ -79,6 +81,8 @@ static int fw_domains_show(struct seq_file *m, void *data)
intel_uncore_forcewake_domain_to_str(fw_domain->id), intel_uncore_forcewake_domain_to_str(fw_domain->id),
READ_ONCE(fw_domain->wake_count)); READ_ONCE(fw_domain->wake_count));
spin_unlock_irq(&uncore->lock);
return 0; return 0;
} }
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains); DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
......
...@@ -295,7 +295,7 @@ struct guc_update_scheduling_policy_header { ...@@ -295,7 +295,7 @@ struct guc_update_scheduling_policy_header {
} __packed; } __packed;
/* /*
* Can't dynmically allocate memory for the scheduling policy KLV because * Can't dynamically allocate memory for the scheduling policy KLV because
* it will be sent from within the reset path. Need a fixed size lump on * it will be sent from within the reset path. Need a fixed size lump on
* the stack instead :(. * the stack instead :(.
* *
......
...@@ -4267,20 +4267,25 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, ...@@ -4267,20 +4267,25 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
u8 new_guc_prio = map_i915_prio_to_guc_prio(prio); u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
/* Short circuit function */ /* Short circuit function */
if (prio < I915_PRIORITY_NORMAL || if (prio < I915_PRIORITY_NORMAL)
rq->guc_prio == GUC_PRIO_FINI ||
(rq->guc_prio != GUC_PRIO_INIT &&
!new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
return; return;
spin_lock(&ce->guc_state.lock); spin_lock(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_FINI) {
if (rq->guc_prio != GUC_PRIO_INIT) if (rq->guc_prio == GUC_PRIO_FINI)
sub_context_inflight_prio(ce, rq->guc_prio); goto exit;
rq->guc_prio = new_guc_prio;
add_context_inflight_prio(ce, rq->guc_prio); if (!new_guc_prio_higher(rq->guc_prio, new_guc_prio))
update_context_prio(ce); goto exit;
}
if (rq->guc_prio != GUC_PRIO_INIT)
sub_context_inflight_prio(ce, rq->guc_prio);
rq->guc_prio = new_guc_prio;
add_context_inflight_prio(ce, rq->guc_prio);
update_context_prio(ce);
exit:
spin_unlock(&ce->guc_state.lock); spin_unlock(&ce->guc_state.lock);
} }
......
...@@ -90,7 +90,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, ...@@ -90,7 +90,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
GEM_BUG_ON(!max_segment); GEM_BUG_ON(!max_segment);
rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN);
if (!rsgt) if (!rsgt)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -104,7 +104,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, ...@@ -104,7 +104,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
} }
if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages), if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
GFP_KERNEL)) { GFP_KERNEL | __GFP_NOWARN)) {
i915_refct_sgt_put(rsgt); i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -178,7 +178,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, ...@@ -178,7 +178,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
GEM_BUG_ON(list_empty(blocks)); GEM_BUG_ON(list_empty(blocks));
GEM_BUG_ON(!max_segment); GEM_BUG_ON(!max_segment);
rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN);
if (!rsgt) if (!rsgt)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -190,7 +190,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, ...@@ -190,7 +190,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
} }
if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) { if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL | __GFP_NOWARN)) {
i915_refct_sgt_put(rsgt); i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment