Commit f8e170a3 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-fixes-2024-08-15' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

- Validate user fence during creation (Brost)
- Fix use after free when client stats are captured (Umesh)
- SRIOV fixes (Michal)
- Runtime PM fixes (Brost)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Zr4KWF5nM1YvnT8H@intel.com
parents 75eac7e8 f0027022
......@@ -87,9 +87,55 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
spin_unlock(&xe->clients.lock);
file->driver_priv = xef;
kref_init(&xef->refcount);
return 0;
}
static void xe_file_destroy(struct kref *ref)
{
struct xe_file *xef = container_of(ref, struct xe_file, refcount);
struct xe_device *xe = xef->xe;
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
xa_destroy(&xef->vm.xa);
mutex_destroy(&xef->vm.lock);
spin_lock(&xe->clients.lock);
xe->clients.count--;
spin_unlock(&xe->clients.lock);
xe_drm_client_put(xef->client);
kfree(xef);
}
/**
* xe_file_get() - Take a reference to the xe file object
* @xef: Pointer to the xe file
*
* Anyone with a pointer to xef must take a reference to the xe file
* object using this call.
*
* Return: xe file pointer
*/
struct xe_file *xe_file_get(struct xe_file *xef)
{
kref_get(&xef->refcount);
return xef;
}
/**
* xe_file_put() - Drop a reference to the xe file object
* @xef: Pointer to the xe file
*
* Used to drop reference to the xef object
*/
void xe_file_put(struct xe_file *xef)
{
kref_put(&xef->refcount, xe_file_destroy);
}
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
......@@ -98,6 +144,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
struct xe_exec_queue *q;
unsigned long idx;
xe_pm_runtime_get(xe);
/*
* No need for exec_queue.lock here as there is no contention for it
* when FD is closing as IOCTLs presumably can't be modifying the
......@@ -108,21 +156,14 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
mutex_unlock(&xef->vm.lock);
xa_destroy(&xef->vm.xa);
mutex_destroy(&xef->vm.lock);
spin_lock(&xe->clients.lock);
xe->clients.count--;
spin_unlock(&xe->clients.lock);
xe_file_put(xef);
xe_drm_client_put(xef->client);
kfree(xef);
xe_pm_runtime_put(xe);
}
static const struct drm_ioctl_desc xe_ioctls[] = {
......
......@@ -170,4 +170,7 @@ static inline bool xe_device_wedged(struct xe_device *xe)
void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
#endif
......@@ -566,6 +566,9 @@ struct xe_file {
/** @client: drm client */
struct xe_drm_client *client;
/** @refcount: ref count of this xe file */
struct kref refcount;
};
#endif
......@@ -251,11 +251,8 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
xa_for_each(&xef->exec_queue.xa, i, q) {
xa_for_each(&xef->exec_queue.xa, i, q)
xe_exec_queue_update_run_ticks(q);
xef->run_ticks[q->class] += q->run_ticks - q->old_run_ticks;
q->old_run_ticks = q->run_ticks;
}
mutex_unlock(&xef->exec_queue.lock);
/* Get the total GPU cycles */
......
......@@ -37,6 +37,10 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
if (q->vm)
xe_vm_put(q->vm);
if (q->xef)
xe_file_put(q->xef);
kfree(q);
}
......@@ -649,6 +653,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
args->exec_queue_id = id;
q->xef = xe_file_get(xef);
return 0;
......@@ -762,6 +767,7 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
......@@ -773,6 +779,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
if (!q->vm || !q->vm->xef)
return;
xef = q->vm->xef;
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
......@@ -783,7 +791,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
q->run_ticks += (new_ts - old_ts) * q->width;
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
}
void xe_exec_queue_kill(struct xe_exec_queue *q)
......
......@@ -38,6 +38,9 @@ enum xe_exec_queue_priority {
* a kernel object.
*/
struct xe_exec_queue {
/** @xef: Back pointer to xe file if this is user created exec queue */
struct xe_file *xef;
/** @gt: graphics tile this exec queue can submit to */
struct xe_gt *gt;
/**
......@@ -139,10 +142,6 @@ struct xe_exec_queue {
* Protected by @vm's resv. Unused if @vm == NULL.
*/
u64 tlb_flush_seqno;
/** @old_run_ticks: prior hw engine class run time in ticks for this exec queue */
u64 old_run_ticks;
/** @run_ticks: hw engine class run time in ticks for this exec queue */
u64 run_ticks;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc *lrc[];
};
......
......@@ -1927,6 +1927,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
bool is_primary = !xe_gt_is_media_type(gt);
bool valid_ggtt, valid_ctxs, valid_dbs;
bool valid_any, valid_all;
......@@ -1935,13 +1936,17 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_dbs = pf_get_vf_config_dbs(gt, vfid);
/* note that GuC doorbells are optional */
valid_any = valid_ggtt || valid_ctxs || valid_dbs;
valid_all = valid_ggtt && valid_ctxs;
valid_any = valid_ctxs || valid_dbs;
valid_all = valid_ctxs;
/* and GGTT/LMEM is configured on primary GT only */
valid_all = valid_all && valid_ggtt;
valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
valid_any = valid_any || valid_lmem;
valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
}
......
......@@ -850,7 +850,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
return bsearch(&key, runtime->regs, runtime->regs_size, sizeof(key),
return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
vf_runtime_reg_cmp);
}
......
This diff is collapsed.
......@@ -23,7 +23,17 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
bool stack);
void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
{
dma_fence_wait(&fence->base, false);
}
#endif /* _XE_GT_TLB_INVALIDATION_ */
......@@ -8,6 +8,8 @@
#include <linux/dma-fence.h>
struct xe_gt;
/**
* struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
*
......@@ -17,6 +19,8 @@
struct xe_gt_tlb_invalidation_fence {
/** @base: dma fence base */
struct dma_fence base;
/** @gt: GT which fence belong to */
struct xe_gt *gt;
/** @link: link into list of pending tlb fences */
struct list_head link;
/** @seqno: seqno of TLB invalidation to signal fence one */
......
......@@ -327,6 +327,8 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
state == XE_GUC_CT_STATE_STOPPED);
if (ct->g2h_outstanding)
xe_pm_runtime_put(ct_to_xe(ct));
ct->g2h_outstanding = 0;
ct->state = state;
......@@ -495,10 +497,15 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{
xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
(g2h_len && num_g2h));
if (g2h_len) {
lockdep_assert_held(&ct->fast_lock);
if (!ct->g2h_outstanding)
xe_pm_runtime_get_noresume(ct_to_xe(ct));
ct->ctbs.g2h.info.space -= g2h_len;
ct->g2h_outstanding += num_g2h;
}
......@@ -511,7 +518,8 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
ct->ctbs.g2h.info.space += g2h_len;
--ct->g2h_outstanding;
if (!--ct->g2h_outstanding)
xe_pm_runtime_put(ct_to_xe(ct));
}
static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
......
......@@ -1393,6 +1393,8 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
default:
XE_WARN_ON("Unknown message type");
}
xe_pm_runtime_put(guc_to_xe(exec_queue_to_guc(msg->private_data)));
}
static const struct drm_sched_backend_ops drm_sched_ops = {
......@@ -1482,6 +1484,8 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
u32 opcode)
{
xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
INIT_LIST_HEAD(&msg->link);
msg->opcode = opcode;
msg->private_data = q;
......
......@@ -1115,23 +1115,6 @@ struct invalidation_fence {
u32 asid;
};
static const char *
invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
{
return "xe";
}
static const char *
invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
{
return "invalidation_fence";
}
static const struct dma_fence_ops invalidation_fence_ops = {
.get_driver_name = invalidation_fence_get_driver_name,
.get_timeline_name = invalidation_fence_get_timeline_name,
};
static void invalidation_fence_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
......@@ -1170,15 +1153,8 @@ static int invalidation_fence_init(struct xe_gt *gt,
trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
spin_lock_irq(&gt->tlb_invalidation.lock);
dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
&gt->tlb_invalidation.lock,
dma_fence_context_alloc(1), 1);
spin_unlock_irq(&gt->tlb_invalidation.lock);
INIT_LIST_HEAD(&ifence->base.link);
xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
dma_fence_get(&ifence->base.base); /* Ref for caller */
ifence->fence = fence;
ifence->gt = gt;
ifence->start = start;
......
......@@ -53,14 +53,18 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
u64 value)
{
struct xe_user_fence *ufence;
u64 __user *ptr = u64_to_user_ptr(addr);
if (!access_ok(ptr, sizeof(ptr)))
return ERR_PTR(-EFAULT);
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
return NULL;
return ERR_PTR(-ENOMEM);
ufence->xe = xe;
kref_init(&ufence->refcount);
ufence->addr = u64_to_user_ptr(addr);
ufence->addr = ptr;
ufence->value = value;
ufence->mm = current->mm;
mmgrab(ufence->mm);
......@@ -183,8 +187,8 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} else {
sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value);
if (XE_IOCTL_DBG(xe, !sync->ufence))
return -ENOMEM;
if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
return PTR_ERR(sync->ufence);
}
break;
......
......@@ -1601,6 +1601,10 @@ static void vm_destroy_work_func(struct work_struct *w)
XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
if (vm->xef)
xe_file_put(vm->xef);
kfree(vm);
}
......@@ -1916,7 +1920,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
}
args->vm_id = id;
vm->xef = xef;
vm->xef = xe_file_get(xef);
/* Record BO memory for VM pagetable created against client */
for_each_tile(tile, xe, id)
......@@ -3337,10 +3341,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
struct xe_tile *tile;
struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
u32 tile_needs_invalidate = 0;
int seqno[XE_MAX_TILES_PER_DEVICE];
u8 id;
int ret;
int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_invalidate(vma);
......@@ -3365,29 +3369,33 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
for_each_tile(tile, xe, id) {
if (xe_pt_zap_ptes(tile, vma)) {
tile_needs_invalidate |= BIT(id);
xe_device_wmb(xe);
xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
&fence[id], true);
/*
* FIXME: We potentially need to invalidate multiple
* GTs within the tile
*/
seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
if (seqno[id] < 0)
return seqno[id];
}
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
&fence[id], vma);
if (ret < 0) {
xe_gt_tlb_invalidation_fence_fini(&fence[id]);
goto wait;
}
for_each_tile(tile, xe, id) {
if (tile_needs_invalidate & BIT(id)) {
ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
if (ret < 0)
return ret;
tile_needs_invalidate |= BIT(id);
}
}
wait:
for_each_tile(tile, xe, id)
if (tile_needs_invalidate & BIT(id))
xe_gt_tlb_invalidation_fence_wait(&fence[id]);
vma->tile_invalidated = vma->tile_mask;
return 0;
return ret;
}
struct xe_vm_snapshot {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment