Commit 80af1f5b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-fixes-2024-03-26' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

- Fix build on mips
- Fix wrong bound checks
- Fix use of msec rather than jiffies
- Remove dead code
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/a47jbz45nry4gjmtyresaraakwnasgngncltmrshbfkx25mhzu@bvay7j3ed7ir
parents 4cece764 0d8cf0c9
...@@ -144,9 +144,6 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo, ...@@ -144,9 +144,6 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
.mem_type = XE_PL_TT, .mem_type = XE_PL_TT,
}; };
*c += 1; *c += 1;
if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
bo->props.preferred_mem_type = XE_PL_TT;
} }
} }
...@@ -181,25 +178,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, ...@@ -181,25 +178,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
} }
places[*c] = place; places[*c] = place;
*c += 1; *c += 1;
if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
bo->props.preferred_mem_type = mem_type;
} }
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c) u32 bo_flags, u32 *c)
{ {
if (bo->props.preferred_gt == XE_GT1) { if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
if (bo_flags & XE_BO_CREATE_VRAM1_BIT) add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
if (bo_flags & XE_BO_CREATE_VRAM0_BIT) add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
} else {
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
}
} }
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
...@@ -223,17 +210,8 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, ...@@ -223,17 +210,8 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
{ {
u32 c = 0; u32 c = 0;
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID; try_add_vram(xe, bo, bo_flags, &c);
try_add_system(xe, bo, bo_flags, &c);
/* The order of placements should indicate preferred location */
if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
try_add_system(xe, bo, bo_flags, &c);
try_add_vram(xe, bo, bo_flags, &c);
} else {
try_add_vram(xe, bo, bo_flags, &c);
try_add_system(xe, bo, bo_flags, &c);
}
try_add_stolen(xe, bo, bo_flags, &c); try_add_stolen(xe, bo, bo_flags, &c);
if (!c) if (!c)
...@@ -1126,13 +1104,6 @@ static void xe_gem_object_close(struct drm_gem_object *obj, ...@@ -1126,13 +1104,6 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
} }
} }
static bool should_migrate_to_system(struct xe_bo *bo)
{
struct xe_device *xe = xe_bo_device(bo);
return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
}
static vm_fault_t xe_gem_fault(struct vm_fault *vmf) static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{ {
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
...@@ -1141,7 +1112,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf) ...@@ -1141,7 +1112,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
struct xe_bo *bo = ttm_to_xe_bo(tbo); struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK; bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
vm_fault_t ret; vm_fault_t ret;
int idx, r = 0; int idx;
if (needs_rpm) if (needs_rpm)
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
...@@ -1153,17 +1124,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf) ...@@ -1153,17 +1124,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
if (drm_dev_enter(ddev, &idx)) { if (drm_dev_enter(ddev, &idx)) {
trace_xe_bo_cpu_fault(bo); trace_xe_bo_cpu_fault(bo);
if (should_migrate_to_system(bo)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
r = xe_bo_migrate(bo, XE_PL_TT); TTM_BO_VM_NUM_PREFAULT);
if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
ret = VM_FAULT_NOPAGE;
else if (r)
ret = VM_FAULT_SIGBUS;
}
if (!ret)
ret = ttm_bo_vm_fault_reserved(vmf,
vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx); drm_dev_exit(idx);
} else { } else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
...@@ -1291,9 +1253,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, ...@@ -1291,9 +1253,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->flags = flags; bo->flags = flags;
bo->cpu_caching = cpu_caching; bo->cpu_caching = cpu_caching;
bo->ttm.base.funcs = &xe_gem_object_funcs; bo->ttm.base.funcs = &xe_gem_object_funcs;
bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
bo->props.preferred_gt = XE_BO_PROPS_INVALID;
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
bo->ttm.priority = XE_BO_PRIORITY_NORMAL; bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
INIT_LIST_HEAD(&bo->pinned_link); INIT_LIST_HEAD(&bo->pinned_link);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
......
...@@ -56,25 +56,6 @@ struct xe_bo { ...@@ -56,25 +56,6 @@ struct xe_bo {
*/ */
struct list_head client_link; struct list_head client_link;
#endif #endif
/** @props: BO user controlled properties */
struct {
/** @preferred_mem: preferred memory class for this BO */
s16 preferred_mem_class;
/** @prefered_gt: preferred GT for this BO */
s16 preferred_gt;
/** @preferred_mem_type: preferred memory type */
s32 preferred_mem_type;
/**
* @cpu_atomic: the CPU expects to do atomics operations to
* this BO
*/
bool cpu_atomic;
/**
* @device_atomic: the device expects to do atomics operations
* to this BO
*/
bool device_atomic;
} props;
/** @freed: List node for delayed put. */ /** @freed: List node for delayed put. */
struct llist_node freed; struct llist_node freed;
/** @created: Whether the bo has passed initial creation */ /** @created: Whether the bo has passed initial creation */
......
...@@ -58,7 +58,7 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe) ...@@ -58,7 +58,7 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id) static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
{ {
if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id > XE_MAX_GT_PER_TILE)) if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
gt_id = 0; gt_id = 0;
return gt_id ? tile->media_gt : tile->primary_gt; return gt_id ? tile->media_gt : tile->primary_gt;
...@@ -79,7 +79,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id) ...@@ -79,7 +79,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
if (MEDIA_VER(xe) >= 13) { if (MEDIA_VER(xe) >= 13) {
gt = xe_tile_get_gt(root_tile, gt_id); gt = xe_tile_get_gt(root_tile, gt_id);
} else { } else {
if (drm_WARN_ON(&xe->drm, gt_id > XE_MAX_TILES_PER_DEVICE)) if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
gt_id = 0; gt_id = 0;
gt = xe->tiles[gt_id].primary_gt; gt = xe->tiles[gt_id].primary_gt;
......
...@@ -448,7 +448,7 @@ find_hw_engine(struct xe_device *xe, ...@@ -448,7 +448,7 @@ find_hw_engine(struct xe_device *xe,
{ {
u32 idx; u32 idx;
if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class)) if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
return NULL; return NULL;
if (eci.gt_id >= xe->info.gt_count) if (eci.gt_id >= xe->info.gt_count)
......
...@@ -1220,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ...@@ -1220,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
init_waitqueue_head(&ge->suspend_wait); init_waitqueue_head(&ge->suspend_wait);
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
q->sched_props.job_timeout_ms; msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc), get_submit_wq(guc),
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64, q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
......
...@@ -97,7 +97,6 @@ static void set_offsets(u32 *regs, ...@@ -97,7 +97,6 @@ static void set_offsets(u32 *regs,
#define REG16(x) \ #define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f) (((x) >> 2) & 0x7f)
#define END 0
{ {
const u32 base = hwe->mmio_base; const u32 base = hwe->mmio_base;
...@@ -168,7 +167,7 @@ static const u8 gen12_xcs_offsets[] = { ...@@ -168,7 +167,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274), REG16(0x274),
REG16(0x270), REG16(0x270),
END 0
}; };
static const u8 dg2_xcs_offsets[] = { static const u8 dg2_xcs_offsets[] = {
...@@ -202,7 +201,7 @@ static const u8 dg2_xcs_offsets[] = { ...@@ -202,7 +201,7 @@ static const u8 dg2_xcs_offsets[] = {
REG16(0x274), REG16(0x274),
REG16(0x270), REG16(0x270),
END 0
}; };
static const u8 gen12_rcs_offsets[] = { static const u8 gen12_rcs_offsets[] = {
...@@ -298,7 +297,7 @@ static const u8 gen12_rcs_offsets[] = { ...@@ -298,7 +297,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084), REG(0x084),
NOP(1), NOP(1),
END 0
}; };
static const u8 xehp_rcs_offsets[] = { static const u8 xehp_rcs_offsets[] = {
...@@ -339,7 +338,7 @@ static const u8 xehp_rcs_offsets[] = { ...@@ -339,7 +338,7 @@ static const u8 xehp_rcs_offsets[] = {
LRI(1, 0), LRI(1, 0),
REG(0x0c8), REG(0x0c8),
END 0
}; };
static const u8 dg2_rcs_offsets[] = { static const u8 dg2_rcs_offsets[] = {
...@@ -382,7 +381,7 @@ static const u8 dg2_rcs_offsets[] = { ...@@ -382,7 +381,7 @@ static const u8 dg2_rcs_offsets[] = {
LRI(1, 0), LRI(1, 0),
REG(0x0c8), REG(0x0c8),
END 0
}; };
static const u8 mtl_rcs_offsets[] = { static const u8 mtl_rcs_offsets[] = {
...@@ -425,7 +424,7 @@ static const u8 mtl_rcs_offsets[] = { ...@@ -425,7 +424,7 @@ static const u8 mtl_rcs_offsets[] = {
LRI(1, 0), LRI(1, 0),
REG(0x0c8), REG(0x0c8),
END 0
}; };
#define XE2_CTX_COMMON \ #define XE2_CTX_COMMON \
...@@ -471,7 +470,7 @@ static const u8 xe2_rcs_offsets[] = { ...@@ -471,7 +470,7 @@ static const u8 xe2_rcs_offsets[] = {
LRI(1, 0), /* [0x47] */ LRI(1, 0), /* [0x47] */
REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */ REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */
END 0
}; };
static const u8 xe2_bcs_offsets[] = { static const u8 xe2_bcs_offsets[] = {
...@@ -482,16 +481,15 @@ static const u8 xe2_bcs_offsets[] = { ...@@ -482,16 +481,15 @@ static const u8 xe2_bcs_offsets[] = {
REG16(0x200), /* [0x42] BCS_SWCTRL */ REG16(0x200), /* [0x42] BCS_SWCTRL */
REG16(0x204), /* [0x44] BLIT_CCTL */ REG16(0x204), /* [0x44] BLIT_CCTL */
END 0
}; };
static const u8 xe2_xcs_offsets[] = { static const u8 xe2_xcs_offsets[] = {
XE2_CTX_COMMON, XE2_CTX_COMMON,
END 0
}; };
#undef END
#undef REG16 #undef REG16
#undef REG #undef REG
#undef LRI #undef LRI
......
...@@ -132,7 +132,7 @@ query_engine_cycles(struct xe_device *xe, ...@@ -132,7 +132,7 @@ query_engine_cycles(struct xe_device *xe,
return -EINVAL; return -EINVAL;
eci = &resp.eci; eci = &resp.eci;
if (eci->gt_id > XE_MAX_GT_PER_TILE) if (eci->gt_id >= XE_MAX_GT_PER_TILE)
return -EINVAL; return -EINVAL;
gt = xe_device_get_gt(xe, eci->gt_id); gt = xe_device_get_gt(xe, eci->gt_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment