Commit 63412a5a authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Change tile masks from u64 to u8

This will save us a few bytes in the xe_vma structure.

v2: Use hweight8 rather than hweight_long (Rodrigo)
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 3daf694c
...@@ -871,7 +871,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, ...@@ -871,7 +871,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
u64 start, u64 end, u64 start, u64 end,
bool read_only, bool read_only,
bool is_null, bool is_null,
u64 tile_mask) u8 tile_mask)
{ {
struct xe_vma *vma; struct xe_vma *vma;
struct xe_tile *tile; struct xe_tile *tile;
...@@ -1579,7 +1579,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e, ...@@ -1579,7 +1579,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
struct dma_fence_array *cf = NULL; struct dma_fence_array *cf = NULL;
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(vma);
int cur_fence = 0, i; int cur_fence = 0, i;
int number_tiles = hweight_long(vma->tile_present); int number_tiles = hweight8(vma->tile_present);
int err; int err;
u8 id; u8 id;
...@@ -1654,7 +1654,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e, ...@@ -1654,7 +1654,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
struct dma_fence_array *cf = NULL; struct dma_fence_array *cf = NULL;
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(vma);
int cur_fence = 0, i; int cur_fence = 0, i;
int number_tiles = hweight_long(vma->tile_mask); int number_tiles = hweight8(vma->tile_mask);
int err; int err;
u8 id; u8 id;
...@@ -2250,7 +2250,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) ...@@ -2250,7 +2250,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
static struct drm_gpuva_ops * static struct drm_gpuva_ops *
vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
u64 bo_offset_or_userptr, u64 addr, u64 range, u64 bo_offset_or_userptr, u64 addr, u64 range,
u32 operation, u64 tile_mask, u32 region) u32 operation, u8 tile_mask, u32 region)
{ {
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
...@@ -2354,7 +2354,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, ...@@ -2354,7 +2354,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
} }
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
u64 tile_mask, bool read_only, bool is_null) u8 tile_mask, bool read_only, bool is_null)
{ {
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct xe_vma *vma; struct xe_vma *vma;
...@@ -3339,7 +3339,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3339,7 +3339,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 addr = bind_ops[i].addr; u64 addr = bind_ops[i].addr;
u32 op = bind_ops[i].op; u32 op = bind_ops[i].op;
u64 obj_offset = bind_ops[i].obj_offset; u64 obj_offset = bind_ops[i].obj_offset;
u64 tile_mask = bind_ops[i].tile_mask; u8 tile_mask = bind_ops[i].tile_mask;
u32 region = bind_ops[i].region; u32 region = bind_ops[i].region;
ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
......
...@@ -38,18 +38,6 @@ struct xe_vma { ...@@ -38,18 +38,6 @@ struct xe_vma {
/** @gpuva: Base GPUVA object */ /** @gpuva: Base GPUVA object */
struct drm_gpuva gpuva; struct drm_gpuva gpuva;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u64 tile_mask;
/**
* @tile_present: GT mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
* vm->userptr.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
* in write mode.
*/
u64 tile_present;
/** @combined_links: links into lists which are mutually exclusive */ /** @combined_links: links into lists which are mutually exclusive */
union { union {
/** /**
...@@ -107,9 +95,21 @@ struct xe_vma { ...@@ -107,9 +95,21 @@ struct xe_vma {
/** @usm: unified shared memory state */ /** @usm: unified shared memory state */
struct { struct {
/** @tile_invalidated: VMA has been invalidated */ /** @tile_invalidated: VMA has been invalidated */
u64 tile_invalidated; u8 tile_invalidated;
} usm; } usm;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
/**
* @tile_present: GT mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
* vm->userptr.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
* in write mode.
*/
u8 tile_present;
struct { struct {
struct list_head rebind_link; struct list_head rebind_link;
} notifier; } notifier;
...@@ -395,7 +395,7 @@ struct xe_vma_op { ...@@ -395,7 +395,7 @@ struct xe_vma_op {
*/ */
struct async_op_fence *fence; struct async_op_fence *fence;
/** @tile_mask: gt mask for this operation */ /** @tile_mask: gt mask for this operation */
u64 tile_mask; u8 tile_mask;
/** @flags: operation flags */ /** @flags: operation flags */
enum xe_vma_op_flags flags; enum xe_vma_op_flags flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment