Commit 111a3f0a authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-fixes-2024-02-01' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

UAPI Changes:
- Only allow a single user-fence per exec / bind.
  The reason for this clarification fix is a limitation in the implementation
  which can be lifted moving forward, if needed.

Driver Changes:
- A crash fix
- A fix for an assert due to missing mem_acces ref
- Only allow a single user-fence per exec / bind.
- Some sparse warning fixes
- Two fixes for compilation failures on various odd
  combinations of gcc / arch pointed out on LKML.
- Fix a fragile partial allocation pointed out on LKML.

Cross-driver Change:
- A sysfs ABI documentation warning fix
  This also touches i915 and is acked by i915 maintainers.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZbuCYdMDVK-kAWC5@fedora
parents 419d8a93 5f16ee27
What: /sys/devices/.../hwmon/hwmon<i>/in0_input What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/in0_input
Date: February 2023 Date: February 2023
KernelVersion: 6.2 KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org Contact: intel-gfx@lists.freedesktop.org
...@@ -6,7 +6,7 @@ Description: RO. Current Voltage in millivolt. ...@@ -6,7 +6,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel i915 graphics platforms. Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max
Date: February 2023 Date: February 2023
KernelVersion: 6.2 KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org Contact: intel-gfx@lists.freedesktop.org
...@@ -20,7 +20,7 @@ Description: RW. Card reactive sustained (PL1/Tau) power limit in microwatts. ...@@ -20,7 +20,7 @@ Description: RW. Card reactive sustained (PL1/Tau) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms. Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_rated_max
Date: February 2023 Date: February 2023
KernelVersion: 6.2 KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org Contact: intel-gfx@lists.freedesktop.org
...@@ -28,7 +28,7 @@ Description: RO. Card default power limit (default TDP setting). ...@@ -28,7 +28,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel i915 graphics platforms. Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max_interval
Date: February 2023 Date: February 2023
KernelVersion: 6.2 KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org Contact: intel-gfx@lists.freedesktop.org
...@@ -37,7 +37,7 @@ Description: RW. Sustained power limit interval (Tau in PL1/Tau) in ...@@ -37,7 +37,7 @@ Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
Only supported for particular Intel i915 graphics platforms. Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_crit What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_crit
Date: February 2023 Date: February 2023
KernelVersion: 6.2 KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org Contact: intel-gfx@lists.freedesktop.org
...@@ -50,7 +50,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts. ...@@ -50,7 +50,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms. Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/curr1_crit
Date: February 2023 Date: February 2023
KernelVersion: 6.2 KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org Contact: intel-gfx@lists.freedesktop.org
...@@ -63,7 +63,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes. ...@@ -63,7 +63,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
Only supported for particular Intel i915 graphics platforms. Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/energy1_input What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/energy1_input
Date: February 2023 Date: February 2023
KernelVersion: 6.2 KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org Contact: intel-gfx@lists.freedesktop.org
......
What: /sys/devices/.../hwmon/hwmon<i>/power1_max What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
Date: September 2023 Date: September 2023
KernelVersion: 6.5 KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org Contact: intel-xe@lists.freedesktop.org
...@@ -12,7 +12,7 @@ Description: RW. Card reactive sustained (PL1) power limit in microwatts. ...@@ -12,7 +12,7 @@ Description: RW. Card reactive sustained (PL1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms. Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
Date: September 2023 Date: September 2023
KernelVersion: 6.5 KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org Contact: intel-xe@lists.freedesktop.org
...@@ -20,7 +20,7 @@ Description: RO. Card default power limit (default TDP setting). ...@@ -20,7 +20,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel xe graphics platforms. Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_crit What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
Date: September 2023 Date: September 2023
KernelVersion: 6.5 KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org Contact: intel-xe@lists.freedesktop.org
...@@ -33,7 +33,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts. ...@@ -33,7 +33,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms. Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
Date: September 2023 Date: September 2023
KernelVersion: 6.5 KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org Contact: intel-xe@lists.freedesktop.org
...@@ -44,7 +44,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes. ...@@ -44,7 +44,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
the operating frequency if the power averaged over a window the operating frequency if the power averaged over a window
exceeds this limit. exceeds this limit.
What: /sys/devices/.../hwmon/hwmon<i>/in0_input What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in0_input
Date: September 2023 Date: September 2023
KernelVersion: 6.5 KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org Contact: intel-xe@lists.freedesktop.org
...@@ -52,7 +52,7 @@ Description: RO. Current Voltage in millivolt. ...@@ -52,7 +52,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel xe graphics platforms. Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/energy1_input What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
Date: September 2023 Date: September 2023
KernelVersion: 6.5 KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org Contact: intel-xe@lists.freedesktop.org
...@@ -60,7 +60,7 @@ Description: RO. Energy input of device in microjoules. ...@@ -60,7 +60,7 @@ Description: RO. Energy input of device in microjoules.
Only supported for particular Intel xe graphics platforms. Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
Date: October 2023 Date: October 2023
KernelVersion: 6.6 KernelVersion: 6.6
Contact: intel-xe@lists.freedesktop.org Contact: intel-xe@lists.freedesktop.org
......
...@@ -50,8 +50,8 @@ ...@@ -50,8 +50,8 @@
#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u) #define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 #define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16) #define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffu << 16)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0) #define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffffu << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn #define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn #define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
......
...@@ -242,8 +242,8 @@ struct slpc_shared_data { ...@@ -242,8 +242,8 @@ struct slpc_shared_data {
(HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \ (HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS) HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 #define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8) #define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xffu << 8)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0) #define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn #define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
#endif #endif
...@@ -82,11 +82,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64); ...@@ -82,11 +82,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
#define GUC_CTB_HDR_LEN 1u #define GUC_CTB_HDR_LEN 1u
#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN #define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
#define GUC_CTB_MSG_MAX_LEN 256u #define GUC_CTB_MSG_MAX_LEN 256u
#define GUC_CTB_MSG_0_FENCE (0xffff << 16) #define GUC_CTB_MSG_0_FENCE (0xffffu << 16)
#define GUC_CTB_MSG_0_FORMAT (0xf << 12) #define GUC_CTB_MSG_0_FORMAT (0xfu << 12)
#define GUC_CTB_FORMAT_HXG 0u #define GUC_CTB_FORMAT_HXG 0u
#define GUC_CTB_MSG_0_RESERVED (0xf << 8) #define GUC_CTB_MSG_0_RESERVED (0xfu << 8)
#define GUC_CTB_MSG_0_NUM_DWORDS (0xff << 0) #define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0)
/** /**
* DOC: CTB HXG Message * DOC: CTB HXG Message
......
...@@ -31,9 +31,9 @@ ...@@ -31,9 +31,9 @@
*/ */
#define GUC_KLV_LEN_MIN 1u #define GUC_KLV_LEN_MIN 1u
#define GUC_KLV_0_KEY (0xffff << 16) #define GUC_KLV_0_KEY (0xffffu << 16)
#define GUC_KLV_0_LEN (0xffff << 0) #define GUC_KLV_0_LEN (0xffffu << 0)
#define GUC_KLV_n_VALUE (0xffffffff << 0) #define GUC_KLV_n_VALUE (0xffffffffu << 0)
/** /**
* DOC: GuC Self Config KLVs * DOC: GuC Self Config KLVs
......
...@@ -40,18 +40,18 @@ ...@@ -40,18 +40,18 @@
*/ */
#define GUC_HXG_MSG_MIN_LEN 1u #define GUC_HXG_MSG_MIN_LEN 1u
#define GUC_HXG_MSG_0_ORIGIN (0x1 << 31) #define GUC_HXG_MSG_0_ORIGIN (0x1u << 31)
#define GUC_HXG_ORIGIN_HOST 0u #define GUC_HXG_ORIGIN_HOST 0u
#define GUC_HXG_ORIGIN_GUC 1u #define GUC_HXG_ORIGIN_GUC 1u
#define GUC_HXG_MSG_0_TYPE (0x7 << 28) #define GUC_HXG_MSG_0_TYPE (0x7u << 28)
#define GUC_HXG_TYPE_REQUEST 0u #define GUC_HXG_TYPE_REQUEST 0u
#define GUC_HXG_TYPE_EVENT 1u #define GUC_HXG_TYPE_EVENT 1u
#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u #define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u #define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u #define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
#define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u #define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u
#define GUC_HXG_MSG_0_AUX (0xfffffff << 0) #define GUC_HXG_MSG_0_AUX (0xfffffffu << 0)
#define GUC_HXG_MSG_n_PAYLOAD (0xffffffff << 0) #define GUC_HXG_MSG_n_PAYLOAD (0xffffffffu << 0)
/** /**
* DOC: HXG Request * DOC: HXG Request
...@@ -85,8 +85,8 @@ ...@@ -85,8 +85,8 @@
*/ */
#define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN #define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfff << 16) #define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfffu << 16)
#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0) #define GUC_HXG_REQUEST_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD #define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/** /**
...@@ -117,8 +117,8 @@ ...@@ -117,8 +117,8 @@
*/ */
#define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN #define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfff << 16) #define GUC_HXG_EVENT_MSG_0_DATA0 (0xfffu << 16)
#define GUC_HXG_EVENT_MSG_0_ACTION (0xffff << 0) #define GUC_HXG_EVENT_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD #define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/** /**
...@@ -188,8 +188,8 @@ ...@@ -188,8 +188,8 @@
*/ */
#define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN #define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN
#define GUC_HXG_FAILURE_MSG_0_HINT (0xfff << 16) #define GUC_HXG_FAILURE_MSG_0_HINT (0xfffu << 16)
#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffff << 0) #define GUC_HXG_FAILURE_MSG_0_ERROR (0xffffu << 0)
/** /**
* DOC: HXG Response * DOC: HXG Response
......
...@@ -111,7 +111,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -111,7 +111,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE]; u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn}; struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec; struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs = 0; u32 i, num_syncs = 0, num_ufence = 0;
struct xe_sched_job *job; struct xe_sched_job *job;
struct dma_fence *rebind_fence; struct dma_fence *rebind_fence;
struct xe_vm *vm; struct xe_vm *vm;
...@@ -157,6 +157,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -157,6 +157,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
SYNC_PARSE_FLAG_LR_MODE : 0)); SYNC_PARSE_FLAG_LR_MODE : 0));
if (err) if (err)
goto err_syncs; goto err_syncs;
if (xe_sync_is_ufence(&syncs[i]))
num_ufence++;
}
if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
err = -EINVAL;
goto err_syncs;
} }
if (xe_exec_queue_is_parallel(q)) { if (xe_exec_queue_is_parallel(q)) {
......
...@@ -480,7 +480,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, ...@@ -480,7 +480,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
* to synchronize with external clients (e.g., firmware), so a semaphore * to synchronize with external clients (e.g., firmware), so a semaphore
* register will also need to be taken. * register will also need to be taken.
*/ */
static void mcr_lock(struct xe_gt *gt) static void mcr_lock(struct xe_gt *gt) __acquires(&gt->mcr_lock)
{ {
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
int ret = 0; int ret = 0;
...@@ -500,7 +500,7 @@ static void mcr_lock(struct xe_gt *gt) ...@@ -500,7 +500,7 @@ static void mcr_lock(struct xe_gt *gt)
drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT); drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
} }
static void mcr_unlock(struct xe_gt *gt) static void mcr_unlock(struct xe_gt *gt) __releases(&gt->mcr_lock)
{ {
/* Release hardware semaphore - this is done by writing 1 to the register */ /* Release hardware semaphore - this is done by writing 1 to the register */
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
......
...@@ -165,7 +165,8 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) ...@@ -165,7 +165,8 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm; goto unlock_vm;
} }
if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) { if (!xe_vma_is_userptr(vma) ||
!xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
downgrade_write(&vm->lock); downgrade_write(&vm->lock);
write_locked = false; write_locked = false;
} }
...@@ -181,11 +182,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) ...@@ -181,11 +182,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
/* TODO: Validate fault */ /* TODO: Validate fault */
if (xe_vma_is_userptr(vma) && write_locked) { if (xe_vma_is_userptr(vma) && write_locked) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&vma->userptr.invalidate_link); list_del_init(&uvma->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock); spin_unlock(&vm->userptr.invalidated_lock);
ret = xe_vma_userptr_pin_pages(vma); ret = xe_vma_userptr_pin_pages(uvma);
if (ret) if (ret)
goto unlock_vm; goto unlock_vm;
...@@ -220,7 +223,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) ...@@ -220,7 +223,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
dma_fence_put(fence); dma_fence_put(fence);
if (xe_vma_is_userptr(vma)) if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(vma); ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
vma->usm.tile_invalidated &= ~BIT(tile->id); vma->usm.tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv: unlock_dma_resv:
......
...@@ -963,7 +963,9 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc) ...@@ -963,7 +963,9 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc)
struct xe_device *xe = pc_to_xe(pc); struct xe_device *xe = pc_to_xe(pc);
if (xe->info.skip_guc_pc) { if (xe->info.skip_guc_pc) {
xe_device_mem_access_get(xe);
xe_gt_idle_disable_c6(pc_to_gt(pc)); xe_gt_idle_disable_c6(pc_to_gt(pc));
xe_device_mem_access_put(xe);
return; return;
} }
......
...@@ -217,13 +217,13 @@ struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx, ...@@ -217,13 +217,13 @@ struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
if (!fence) if (!fence)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
ctx->dma_fence_ctx, ctx->next_seqno++);
fence->ctx = ctx; fence->ctx = ctx;
fence->seqno_map = seqno_map; fence->seqno_map = seqno_map;
INIT_LIST_HEAD(&fence->irq_link); INIT_LIST_HEAD(&fence->irq_link);
dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
ctx->dma_fence_ctx, ctx->next_seqno++);
trace_xe_hw_fence_create(fence); trace_xe_hw_fence_create(fence);
return fence; return fence;
......
...@@ -21,10 +21,10 @@ ...@@ -21,10 +21,10 @@
#include "xe_map.h" #include "xe_map.h"
#include "xe_vm.h" #include "xe_vm.h"
#define CTX_VALID (1 << 0) #define LRC_VALID (1 << 0)
#define CTX_PRIVILEGE (1 << 8) #define LRC_PRIVILEGE (1 << 8)
#define CTX_ADDRESSING_MODE_SHIFT 3 #define LRC_ADDRESSING_MODE_SHIFT 3
#define LEGACY_64B_CONTEXT 3 #define LRC_LEGACY_64B_CONTEXT 3
#define ENGINE_CLASS_SHIFT 61 #define ENGINE_CLASS_SHIFT 61
#define ENGINE_INSTANCE_SHIFT 48 #define ENGINE_INSTANCE_SHIFT 48
...@@ -762,15 +762,15 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, ...@@ -762,15 +762,15 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
(q->usm.acc_notify << ACC_NOTIFY_S) | (q->usm.acc_notify << ACC_NOTIFY_S) |
q->usm.acc_trigger); q->usm.acc_trigger);
lrc->desc = CTX_VALID; lrc->desc = LRC_VALID;
lrc->desc |= LEGACY_64B_CONTEXT << CTX_ADDRESSING_MODE_SHIFT; lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
/* TODO: Priority */ /* TODO: Priority */
/* While this appears to have something about privileged batches or /* While this appears to have something about privileged batches or
* some such, it really just means PPGTT mode. * some such, it really just means PPGTT mode.
*/ */
if (vm) if (vm)
lrc->desc |= CTX_PRIVILEGE; lrc->desc |= LRC_PRIVILEGE;
if (GRAPHICS_VERx100(xe) < 1250) { if (GRAPHICS_VERx100(xe) < 1250) {
lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT; lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
......
...@@ -618,8 +618,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, ...@@ -618,8 +618,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma)) { if (!xe_vma_is_null(vma)) {
if (xe_vma_is_userptr(vma)) if (xe_vma_is_userptr(vma))
xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma), xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
&curs); xe_vma_size(vma), &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs); xe_vma_size(vma), &curs);
...@@ -906,17 +906,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe, ...@@ -906,17 +906,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
static int xe_pt_userptr_inject_eagain(struct xe_vma *vma) static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{ {
u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2; u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
static u32 count; static u32 count;
if (count++ % divisor == divisor - 1) { if (count++ % divisor == divisor - 1) {
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(&uvma->vma);
vma->userptr.divisor = divisor << 1; uvma->userptr.divisor = divisor << 1;
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&vma->userptr.invalidate_link, list_move_tail(&uvma->userptr.invalidate_link,
&vm->userptr.invalidated); &vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock); spin_unlock(&vm->userptr.invalidated_lock);
return true; return true;
...@@ -927,7 +927,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma) ...@@ -927,7 +927,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
#else #else
static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma) static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{ {
return false; return false;
} }
...@@ -1000,9 +1000,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) ...@@ -1000,9 +1000,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{ {
struct xe_pt_migrate_pt_update *userptr_update = struct xe_pt_migrate_pt_update *userptr_update =
container_of(pt_update, typeof(*userptr_update), base); container_of(pt_update, typeof(*userptr_update), base);
struct xe_vma *vma = pt_update->vma; struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
unsigned long notifier_seq = vma->userptr.notifier_seq; unsigned long notifier_seq = uvma->userptr.notifier_seq;
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(&uvma->vma);
int err = xe_pt_vm_dependencies(pt_update->job, int err = xe_pt_vm_dependencies(pt_update->job,
&vm->rftree[pt_update->tile_id], &vm->rftree[pt_update->tile_id],
pt_update->start, pt_update->start,
...@@ -1023,7 +1023,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) ...@@ -1023,7 +1023,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
*/ */
do { do {
down_read(&vm->userptr.notifier_lock); down_read(&vm->userptr.notifier_lock);
if (!mmu_interval_read_retry(&vma->userptr.notifier, if (!mmu_interval_read_retry(&uvma->userptr.notifier,
notifier_seq)) notifier_seq))
break; break;
...@@ -1032,11 +1032,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) ...@@ -1032,11 +1032,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
if (userptr_update->bind) if (userptr_update->bind)
return -EAGAIN; return -EAGAIN;
notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier); notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
} while (true); } while (true);
/* Inject errors to test_whether they are handled correctly */ /* Inject errors to test_whether they are handled correctly */
if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) { if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
up_read(&vm->userptr.notifier_lock); up_read(&vm->userptr.notifier_lock);
return -EAGAIN; return -EAGAIN;
} }
...@@ -1297,7 +1297,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue ...@@ -1297,7 +1297,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
vma->tile_present |= BIT(tile->id); vma->tile_present |= BIT(tile->id);
if (bind_pt_update.locked) { if (bind_pt_update.locked) {
vma->userptr.initial_bind = true; to_userptr_vma(vma)->userptr.initial_bind = true;
up_read(&vm->userptr.notifier_lock); up_read(&vm->userptr.notifier_lock);
xe_bo_put_commit(&deferred); xe_bo_put_commit(&deferred);
} }
...@@ -1642,7 +1642,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu ...@@ -1642,7 +1642,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
if (!vma->tile_present) { if (!vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&vma->userptr.invalidate_link); list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock); spin_unlock(&vm->userptr.invalidated_lock);
} }
up_read(&vm->userptr.notifier_lock); up_read(&vm->userptr.notifier_lock);
......
...@@ -459,21 +459,21 @@ static size_t calc_topo_query_size(struct xe_device *xe) ...@@ -459,21 +459,21 @@ static size_t calc_topo_query_size(struct xe_device *xe)
sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss)); sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
} }
static void __user *copy_mask(void __user *ptr, static int copy_mask(void __user **ptr,
struct drm_xe_query_topology_mask *topo, struct drm_xe_query_topology_mask *topo,
void *mask, size_t mask_size) void *mask, size_t mask_size)
{ {
topo->num_bytes = mask_size; topo->num_bytes = mask_size;
if (copy_to_user(ptr, topo, sizeof(*topo))) if (copy_to_user(*ptr, topo, sizeof(*topo)))
return ERR_PTR(-EFAULT); return -EFAULT;
ptr += sizeof(topo); *ptr += sizeof(topo);
if (copy_to_user(ptr, mask, mask_size)) if (copy_to_user(*ptr, mask, mask_size))
return ERR_PTR(-EFAULT); return -EFAULT;
ptr += mask_size; *ptr += mask_size;
return ptr; return 0;
} }
static int query_gt_topology(struct xe_device *xe, static int query_gt_topology(struct xe_device *xe,
...@@ -493,28 +493,28 @@ static int query_gt_topology(struct xe_device *xe, ...@@ -493,28 +493,28 @@ static int query_gt_topology(struct xe_device *xe,
} }
for_each_gt(gt, xe, id) { for_each_gt(gt, xe, id) {
int err;
topo.gt_id = id; topo.gt_id = id;
topo.type = DRM_XE_TOPO_DSS_GEOMETRY; topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
query_ptr = copy_mask(query_ptr, &topo, err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
gt->fuse_topo.g_dss_mask, sizeof(gt->fuse_topo.g_dss_mask));
sizeof(gt->fuse_topo.g_dss_mask)); if (err)
if (IS_ERR(query_ptr)) return err;
return PTR_ERR(query_ptr);
topo.type = DRM_XE_TOPO_DSS_COMPUTE; topo.type = DRM_XE_TOPO_DSS_COMPUTE;
query_ptr = copy_mask(query_ptr, &topo, err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
gt->fuse_topo.c_dss_mask, sizeof(gt->fuse_topo.c_dss_mask));
sizeof(gt->fuse_topo.c_dss_mask)); if (err)
if (IS_ERR(query_ptr)) return err;
return PTR_ERR(query_ptr);
topo.type = DRM_XE_TOPO_EU_PER_DSS; topo.type = DRM_XE_TOPO_EU_PER_DSS;
query_ptr = copy_mask(query_ptr, &topo, err = copy_mask(&query_ptr, &topo,
gt->fuse_topo.eu_mask_per_dss, gt->fuse_topo.eu_mask_per_dss,
sizeof(gt->fuse_topo.eu_mask_per_dss)); sizeof(gt->fuse_topo.eu_mask_per_dss));
if (IS_ERR(query_ptr)) if (err)
return PTR_ERR(query_ptr); return err;
} }
return 0; return 0;
......
...@@ -33,4 +33,9 @@ struct dma_fence * ...@@ -33,4 +33,9 @@ struct dma_fence *
xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
struct xe_exec_queue *q, struct xe_vm *vm); struct xe_exec_queue *q, struct xe_vm *vm);
static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
{
return !!sync->ufence;
}
#endif #endif
...@@ -46,7 +46,7 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) ...@@ -46,7 +46,7 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
/** /**
* xe_vma_userptr_check_repin() - Advisory check for repin needed * xe_vma_userptr_check_repin() - Advisory check for repin needed
* @vma: The userptr vma * @uvma: The userptr vma
* *
* Check if the userptr vma has been invalidated since last successful * Check if the userptr vma has been invalidated since last successful
* repin. The check is advisory only and can the function can be called * repin. The check is advisory only and can the function can be called
...@@ -56,15 +56,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) ...@@ -56,15 +56,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
* *
* Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended. * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
*/ */
int xe_vma_userptr_check_repin(struct xe_vma *vma) int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
{ {
return mmu_interval_check_retry(&vma->userptr.notifier, return mmu_interval_check_retry(&uvma->userptr.notifier,
vma->userptr.notifier_seq) ? uvma->userptr.notifier_seq) ?
-EAGAIN : 0; -EAGAIN : 0;
} }
int xe_vma_userptr_pin_pages(struct xe_vma *vma) int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
{ {
struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT; const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
...@@ -80,30 +82,30 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) ...@@ -80,30 +82,30 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
if (vma->gpuva.flags & XE_VMA_DESTROYED) if (vma->gpuva.flags & XE_VMA_DESTROYED)
return 0; return 0;
notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier); notifier_seq = mmu_interval_read_begin(&userptr->notifier);
if (notifier_seq == vma->userptr.notifier_seq) if (notifier_seq == userptr->notifier_seq)
return 0; return 0;
pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
if (vma->userptr.sg) { if (userptr->sg) {
dma_unmap_sgtable(xe->drm.dev, dma_unmap_sgtable(xe->drm.dev,
vma->userptr.sg, userptr->sg,
read_only ? DMA_TO_DEVICE : read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL, 0); DMA_BIDIRECTIONAL, 0);
sg_free_table(vma->userptr.sg); sg_free_table(userptr->sg);
vma->userptr.sg = NULL; userptr->sg = NULL;
} }
pinned = ret = 0; pinned = ret = 0;
if (in_kthread) { if (in_kthread) {
if (!mmget_not_zero(vma->userptr.notifier.mm)) { if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT; ret = -EFAULT;
goto mm_closed; goto mm_closed;
} }
kthread_use_mm(vma->userptr.notifier.mm); kthread_use_mm(userptr->notifier.mm);
} }
while (pinned < num_pages) { while (pinned < num_pages) {
...@@ -123,32 +125,32 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) ...@@ -123,32 +125,32 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
} }
if (in_kthread) { if (in_kthread) {
kthread_unuse_mm(vma->userptr.notifier.mm); kthread_unuse_mm(userptr->notifier.mm);
mmput(vma->userptr.notifier.mm); mmput(userptr->notifier.mm);
} }
mm_closed: mm_closed:
if (ret) if (ret)
goto out; goto out;
ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages, ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
pinned, 0, pinned, 0,
(u64)pinned << PAGE_SHIFT, (u64)pinned << PAGE_SHIFT,
xe_sg_segment_size(xe->drm.dev), xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL); GFP_KERNEL);
if (ret) { if (ret) {
vma->userptr.sg = NULL; userptr->sg = NULL;
goto out; goto out;
} }
vma->userptr.sg = &vma->userptr.sgt; userptr->sg = &userptr->sgt;
ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg, ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
read_only ? DMA_TO_DEVICE : read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_NO_KERNEL_MAPPING); DMA_ATTR_NO_KERNEL_MAPPING);
if (ret) { if (ret) {
sg_free_table(vma->userptr.sg); sg_free_table(userptr->sg);
vma->userptr.sg = NULL; userptr->sg = NULL;
goto out; goto out;
} }
...@@ -167,8 +169,8 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) ...@@ -167,8 +169,8 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
kvfree(pages); kvfree(pages);
if (!(ret < 0)) { if (!(ret < 0)) {
vma->userptr.notifier_seq = notifier_seq; userptr->notifier_seq = notifier_seq;
if (xe_vma_userptr_check_repin(vma) == -EAGAIN) if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
goto retry; goto retry;
} }
...@@ -635,7 +637,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, ...@@ -635,7 +637,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range, const struct mmu_notifier_range *range,
unsigned long cur_seq) unsigned long cur_seq)
{ {
struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier); struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor; struct dma_resv_iter cursor;
struct dma_fence *fence; struct dma_fence *fence;
...@@ -651,7 +655,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, ...@@ -651,7 +655,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq); mmu_interval_set_seq(mni, cur_seq);
/* No need to stop gpu access if the userptr is not yet bound. */ /* No need to stop gpu access if the userptr is not yet bound. */
if (!vma->userptr.initial_bind) { if (!userptr->initial_bind) {
up_write(&vm->userptr.notifier_lock); up_write(&vm->userptr.notifier_lock);
return true; return true;
} }
...@@ -663,7 +667,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, ...@@ -663,7 +667,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
if (!xe_vm_in_fault_mode(vm) && if (!xe_vm_in_fault_mode(vm) &&
!(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) { !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&vma->userptr.invalidate_link, list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated); &vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock); spin_unlock(&vm->userptr.invalidated_lock);
} }
...@@ -703,7 +707,7 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { ...@@ -703,7 +707,7 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
int xe_vm_userptr_pin(struct xe_vm *vm) int xe_vm_userptr_pin(struct xe_vm *vm)
{ {
struct xe_vma *vma, *next; struct xe_userptr_vma *uvma, *next;
int err = 0; int err = 0;
LIST_HEAD(tmp_evict); LIST_HEAD(tmp_evict);
...@@ -711,22 +715,23 @@ int xe_vm_userptr_pin(struct xe_vm *vm) ...@@ -711,22 +715,23 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
/* Collect invalidated userptrs */ /* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
list_for_each_entry_safe(vma, next, &vm->userptr.invalidated, list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) { userptr.invalidate_link) {
list_del_init(&vma->userptr.invalidate_link); list_del_init(&uvma->userptr.invalidate_link);
list_move_tail(&vma->combined_links.userptr, list_move_tail(&uvma->userptr.repin_link,
&vm->userptr.repin_list); &vm->userptr.repin_list);
} }
spin_unlock(&vm->userptr.invalidated_lock); spin_unlock(&vm->userptr.invalidated_lock);
/* Pin and move to temporary list */ /* Pin and move to temporary list */
list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
combined_links.userptr) { userptr.repin_link) {
err = xe_vma_userptr_pin_pages(vma); err = xe_vma_userptr_pin_pages(uvma);
if (err < 0) if (err < 0)
return err; return err;
list_move_tail(&vma->combined_links.userptr, &vm->rebind_list); list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
} }
return 0; return 0;
...@@ -782,6 +787,14 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) ...@@ -782,6 +787,14 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return fence; return fence;
} }
static void xe_vma_free(struct xe_vma *vma)
{
if (xe_vma_is_userptr(vma))
kfree(to_userptr_vma(vma));
else
kfree(vma);
}
#define VMA_CREATE_FLAG_READ_ONLY BIT(0) #define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1) #define VMA_CREATE_FLAG_IS_NULL BIT(1)
...@@ -800,14 +813,26 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, ...@@ -800,14 +813,26 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
xe_assert(vm->xe, start < end); xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size); xe_assert(vm->xe, end < vm->size);
if (!bo && !is_null) /* userptr */ /*
* Allocate and ensure that the xe_vma_is_userptr() return
* matches what was allocated.
*/
if (!bo && !is_null) {
struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
if (!uvma)
return ERR_PTR(-ENOMEM);
vma = &uvma->vma;
} else {
vma = kzalloc(sizeof(*vma), GFP_KERNEL); vma = kzalloc(sizeof(*vma), GFP_KERNEL);
else if (!vma)
vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr), return ERR_PTR(-ENOMEM);
GFP_KERNEL);
if (!vma) { if (is_null)
vma = ERR_PTR(-ENOMEM); vma->gpuva.flags |= DRM_GPUVA_SPARSE;
return vma; if (bo)
vma->gpuva.gem.obj = &bo->ttm.base;
} }
INIT_LIST_HEAD(&vma->combined_links.rebind); INIT_LIST_HEAD(&vma->combined_links.rebind);
...@@ -818,8 +843,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, ...@@ -818,8 +843,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.va.range = end - start + 1; vma->gpuva.va.range = end - start + 1;
if (read_only) if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY; vma->gpuva.flags |= XE_VMA_READ_ONLY;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
for_each_tile(tile, vm->xe, id) for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id; vma->tile_mask |= 0x1 << id;
...@@ -836,35 +859,35 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, ...@@ -836,35 +859,35 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
if (IS_ERR(vm_bo)) { if (IS_ERR(vm_bo)) {
kfree(vma); xe_vma_free(vma);
return ERR_CAST(vm_bo); return ERR_CAST(vm_bo);
} }
drm_gpuvm_bo_extobj_add(vm_bo); drm_gpuvm_bo_extobj_add(vm_bo);
drm_gem_object_get(&bo->ttm.base); drm_gem_object_get(&bo->ttm.base);
vma->gpuva.gem.obj = &bo->ttm.base;
vma->gpuva.gem.offset = bo_offset_or_userptr; vma->gpuva.gem.offset = bo_offset_or_userptr;
drm_gpuva_link(&vma->gpuva, vm_bo); drm_gpuva_link(&vma->gpuva, vm_bo);
drm_gpuvm_bo_put(vm_bo); drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ { } else /* userptr or null */ {
if (!is_null) { if (!is_null) {
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
u64 size = end - start + 1; u64 size = end - start + 1;
int err; int err;
INIT_LIST_HEAD(&vma->userptr.invalidate_link); INIT_LIST_HEAD(&userptr->invalidate_link);
INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr; vma->gpuva.gem.offset = bo_offset_or_userptr;
err = mmu_interval_notifier_insert(&vma->userptr.notifier, err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm, current->mm,
xe_vma_userptr(vma), size, xe_vma_userptr(vma), size,
&vma_userptr_notifier_ops); &vma_userptr_notifier_ops);
if (err) { if (err) {
kfree(vma); xe_vma_free(vma);
vma = ERR_PTR(err); return ERR_PTR(err);
return vma;
} }
vma->userptr.notifier_seq = LONG_MAX; userptr->notifier_seq = LONG_MAX;
} }
xe_vm_get(vm); xe_vm_get(vm);
...@@ -880,13 +903,15 @@ static void xe_vma_destroy_late(struct xe_vma *vma) ...@@ -880,13 +903,15 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
bool read_only = xe_vma_read_only(vma); bool read_only = xe_vma_read_only(vma);
if (xe_vma_is_userptr(vma)) { if (xe_vma_is_userptr(vma)) {
if (vma->userptr.sg) { struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
if (userptr->sg) {
dma_unmap_sgtable(xe->drm.dev, dma_unmap_sgtable(xe->drm.dev,
vma->userptr.sg, userptr->sg,
read_only ? DMA_TO_DEVICE : read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL, 0); DMA_BIDIRECTIONAL, 0);
sg_free_table(vma->userptr.sg); sg_free_table(userptr->sg);
vma->userptr.sg = NULL; userptr->sg = NULL;
} }
/* /*
...@@ -894,7 +919,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) ...@@ -894,7 +919,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
* the notifer until we're sure the GPU is not accessing * the notifer until we're sure the GPU is not accessing
* them anymore * them anymore
*/ */
mmu_interval_notifier_remove(&vma->userptr.notifier); mmu_interval_notifier_remove(&userptr->notifier);
xe_vm_put(vm); xe_vm_put(vm);
} else if (xe_vma_is_null(vma)) { } else if (xe_vma_is_null(vma)) {
xe_vm_put(vm); xe_vm_put(vm);
...@@ -902,7 +927,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) ...@@ -902,7 +927,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
xe_bo_put(xe_vma_bo(vma)); xe_bo_put(xe_vma_bo(vma));
} }
kfree(vma); xe_vma_free(vma);
} }
static void vma_destroy_work_func(struct work_struct *w) static void vma_destroy_work_func(struct work_struct *w)
...@@ -933,7 +958,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) ...@@ -933,7 +958,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
list_del(&vma->userptr.invalidate_link); list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock); spin_unlock(&vm->userptr.invalidated_lock);
} else if (!xe_vma_is_null(vma)) { } else if (!xe_vma_is_null(vma)) {
xe_bo_assert_held(xe_vma_bo(vma)); xe_bo_assert_held(xe_vma_bo(vma));
...@@ -2150,7 +2175,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, ...@@ -2150,7 +2175,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
drm_exec_fini(&exec); drm_exec_fini(&exec);
if (xe_vma_is_userptr(vma)) { if (xe_vma_is_userptr(vma)) {
err = xe_vma_userptr_pin_pages(vma); err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
if (err) { if (err) {
prep_vma_destroy(vm, vma, false); prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma); xe_vma_destroy_unlocked(vma);
...@@ -2507,7 +2532,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, ...@@ -2507,7 +2532,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
if (err == -EAGAIN && xe_vma_is_userptr(vma)) { if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
lockdep_assert_held_write(&vm->lock); lockdep_assert_held_write(&vm->lock);
err = xe_vma_userptr_pin_pages(vma); err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
if (!err) if (!err)
goto retry_userptr; goto retry_userptr;
...@@ -2851,7 +2876,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2851,7 +2876,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_gpuva_ops **ops = NULL; struct drm_gpuva_ops **ops = NULL;
struct xe_vm *vm; struct xe_vm *vm;
struct xe_exec_queue *q = NULL; struct xe_exec_queue *q = NULL;
u32 num_syncs; u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL; struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops; struct drm_xe_vm_bind_op *bind_ops;
LIST_HEAD(ops_list); LIST_HEAD(ops_list);
...@@ -2988,6 +3013,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2988,6 +3013,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0)); SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
if (err) if (err)
goto free_syncs; goto free_syncs;
if (xe_sync_is_ufence(&syncs[num_syncs]))
num_ufence++;
}
if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
err = -EINVAL;
goto free_syncs;
} }
if (!args->num_binds) { if (!args->num_binds) {
...@@ -3130,8 +3163,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ...@@ -3130,8 +3163,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) { if (xe_vma_is_userptr(vma)) {
WARN_ON_ONCE(!mmu_interval_check_retry WARN_ON_ONCE(!mmu_interval_check_retry
(&vma->userptr.notifier, (&to_userptr_vma(vma)->userptr.notifier,
vma->userptr.notifier_seq)); to_userptr_vma(vma)->userptr.notifier_seq));
WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)), WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
DMA_RESV_USAGE_BOOKKEEP)); DMA_RESV_USAGE_BOOKKEEP));
...@@ -3192,11 +3225,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) ...@@ -3192,11 +3225,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
if (is_null) { if (is_null) {
addr = 0; addr = 0;
} else if (is_userptr) { } else if (is_userptr) {
struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
struct xe_res_cursor cur; struct xe_res_cursor cur;
if (vma->userptr.sg) { if (sg) {
xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE, xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
&cur);
addr = xe_res_dma(&cur); addr = xe_res_dma(&cur);
} else { } else {
addr = 0; addr = 0;
......
...@@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma) ...@@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma); return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
} }
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
*
* Return: Pointer to the embedding userptr vma
*/
static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
{
xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
return container_of(vma, struct xe_userptr_vma, vma);
}
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile); u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
int xe_vm_create_ioctl(struct drm_device *dev, void *data, int xe_vm_create_ioctl(struct drm_device *dev, void *data,
...@@ -224,9 +236,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) ...@@ -224,9 +236,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
} }
} }
int xe_vma_userptr_pin_pages(struct xe_vma *vma); int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
int xe_vma_userptr_check_repin(struct xe_vma *vma); int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end); bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
......
...@@ -37,6 +37,8 @@ struct xe_vm; ...@@ -37,6 +37,8 @@ struct xe_vm;
struct xe_userptr { struct xe_userptr {
/** @invalidate_link: Link for the vm::userptr.invalidated list */ /** @invalidate_link: Link for the vm::userptr.invalidated list */
struct list_head invalidate_link; struct list_head invalidate_link;
/** @userptr: link into VM repin list if userptr. */
struct list_head repin_link;
/** /**
* @notifier: MMU notifier for user pointer (invalidation call back) * @notifier: MMU notifier for user pointer (invalidation call back)
*/ */
...@@ -68,8 +70,6 @@ struct xe_vma { ...@@ -68,8 +70,6 @@ struct xe_vma {
* resv. * resv.
*/ */
union { union {
/** @userptr: link into VM repin list if userptr. */
struct list_head userptr;
/** @rebind: link into VM if this VMA needs rebinding. */ /** @rebind: link into VM if this VMA needs rebinding. */
struct list_head rebind; struct list_head rebind;
/** @destroy: link to contested list when VM is being closed. */ /** @destroy: link to contested list when VM is being closed. */
...@@ -105,11 +105,15 @@ struct xe_vma { ...@@ -105,11 +105,15 @@ struct xe_vma {
* @pat_index: The pat index to use when encoding the PTEs for this vma. * @pat_index: The pat index to use when encoding the PTEs for this vma.
*/ */
u16 pat_index; u16 pat_index;
};
/** /**
* @userptr: user pointer state, only allocated for VMAs that are * struct xe_userptr_vma - A userptr vma subclass
* user pointers * @vma: The vma.
*/ * @userptr: Additional userptr information.
*/
struct xe_userptr_vma {
struct xe_vma vma;
struct xe_userptr userptr; struct xe_userptr userptr;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment