Commit 38c04b47 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Rodrigo Vivi

drm/xe: Use atomic instead of mutex for xe_device_mem_access_ongoing

xe_guc_ct_fast_path() is called from an irq context, and cannot lock
the mutex used by xe_device_mem_access_ongoing().

Fortunately it is easy to fix, and the atomic guarantees are good enough
to ensure xe->mem_access.hold_rpm is set before last ref is dropped.

As far as I can tell, the runtime ref in device access should be
killable, but don't dare to do it yet.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Acked-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 044f0cfb
...@@ -206,8 +206,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, ...@@ -206,8 +206,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err) if (err)
goto err_put; goto err_put;
drmm_mutex_init(&xe->drm, &xe->mem_access.lock);
return xe; return xe;
err_put: err_put:
...@@ -354,25 +352,25 @@ u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) ...@@ -354,25 +352,25 @@ u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
void xe_device_mem_access_get(struct xe_device *xe) void xe_device_mem_access_get(struct xe_device *xe)
{ {
bool resumed = xe_pm_runtime_resume_if_suspended(xe); bool resumed = xe_pm_runtime_resume_if_suspended(xe);
int ref = atomic_inc_return(&xe->mem_access.ref);
mutex_lock(&xe->mem_access.lock); if (ref == 1)
if (xe->mem_access.ref++ == 0)
xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe); xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe);
mutex_unlock(&xe->mem_access.lock);
/* The usage counter increased if device was immediately resumed */ /* The usage counter increased if device was immediately resumed */
if (resumed) if (resumed)
xe_pm_runtime_put(xe); xe_pm_runtime_put(xe);
XE_WARN_ON(xe->mem_access.ref == S32_MAX); XE_WARN_ON(ref == S32_MAX);
} }
void xe_device_mem_access_put(struct xe_device *xe) void xe_device_mem_access_put(struct xe_device *xe)
{ {
mutex_lock(&xe->mem_access.lock); bool hold = xe->mem_access.hold_rpm;
if (--xe->mem_access.ref == 0 && xe->mem_access.hold_rpm) int ref = atomic_dec_return(&xe->mem_access.ref);
if (!ref && hold)
xe_pm_runtime_put(xe); xe_pm_runtime_put(xe);
mutex_unlock(&xe->mem_access.lock);
XE_WARN_ON(xe->mem_access.ref < 0); XE_WARN_ON(ref < 0);
} }
...@@ -90,20 +90,14 @@ static inline struct xe_force_wake * gt_to_fw(struct xe_gt *gt) ...@@ -90,20 +90,14 @@ static inline struct xe_force_wake * gt_to_fw(struct xe_gt *gt)
void xe_device_mem_access_get(struct xe_device *xe); void xe_device_mem_access_get(struct xe_device *xe);
void xe_device_mem_access_put(struct xe_device *xe); void xe_device_mem_access_put(struct xe_device *xe);
static inline void xe_device_assert_mem_access(struct xe_device *xe) static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
{ {
XE_WARN_ON(!xe->mem_access.ref); return atomic_read(&xe->mem_access.ref);
} }
static inline bool xe_device_mem_access_ongoing(struct xe_device *xe) static inline void xe_device_assert_mem_access(struct xe_device *xe)
{ {
bool ret; XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
mutex_lock(&xe->mem_access.lock);
ret = xe->mem_access.ref;
mutex_unlock(&xe->mem_access.lock);
return ret;
} }
static inline bool xe_device_in_fault_mode(struct xe_device *xe) static inline bool xe_device_in_fault_mode(struct xe_device *xe)
......
...@@ -184,10 +184,8 @@ struct xe_device { ...@@ -184,10 +184,8 @@ struct xe_device {
* triggering additional actions when they occur. * triggering additional actions when they occur.
*/ */
struct { struct {
/** @lock: protect the ref count */
struct mutex lock;
/** @ref: ref count of memory accesses */ /** @ref: ref count of memory accesses */
s32 ref; atomic_t ref;
/** @hold_rpm: need to put rpm ref back at the end */ /** @hold_rpm: need to put rpm ref back at the end */
bool hold_rpm; bool hold_rpm;
} mem_access; } mem_access;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment