Commit c0a51fd0 authored by Christian König's avatar Christian König Committed by Chris Wilson

drm: move read_domains and write_domain into i915

i915 is the only driver using those fields in the drm_gem_object
structure, so they only waste memory for all other drivers.

Move the fields into drm_i915_gem_object instead and patch the i915 code
with the following sed commands:

sed -i "s/obj->base.read_domains/obj->read_domains/g" drivers/gpu/drm/i915/*.c drivers/gpu/drm/i915/*/*.c
sed -i "s/obj->base.write_domain/obj->write_domain/g" drivers/gpu/drm/i915/*.c drivers/gpu/drm/i915/*/*.c

Change is only compile tested.

v2: move fields around as suggested by Chris.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180216124338.9087-1-christian.koenig@amd.comSigned-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent e1039626
...@@ -162,8 +162,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, ...@@ -162,8 +162,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
info->size << PAGE_SHIFT); info->size << PAGE_SHIFT);
i915_gem_object_init(obj, &intel_vgpu_gem_ops); i915_gem_object_init(obj, &intel_vgpu_gem_ops);
obj->base.read_domains = I915_GEM_DOMAIN_GTT; obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = 0; obj->write_domain = 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
unsigned int tiling_mode = 0; unsigned int tiling_mode = 0;
unsigned int stride = 0; unsigned int stride = 0;
......
...@@ -150,8 +150,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -150,8 +150,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_global_flag(obj), get_global_flag(obj),
get_pin_mapped_flag(obj), get_pin_mapped_flag(obj),
obj->base.size / 1024, obj->base.size / 1024,
obj->base.read_domains, obj->read_domains,
obj->base.write_domain, obj->write_domain,
i915_cache_level_str(dev_priv, obj->cache_level), i915_cache_level_str(dev_priv, obj->cache_level),
obj->mm.dirty ? " dirty" : "", obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
......
...@@ -240,8 +240,8 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -240,8 +240,8 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
static void __start_cpu_write(struct drm_i915_gem_object *obj) static void __start_cpu_write(struct drm_i915_gem_object *obj)
{ {
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj)) if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true; obj->cache_dirty = true;
} }
...@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, ...@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
obj->mm.dirty = false; obj->mm.dirty = false;
if (needs_clflush && if (needs_clflush &&
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages); drm_clflush_sg(pages);
...@@ -703,10 +703,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) ...@@ -703,10 +703,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma; struct i915_vma *vma;
if (!(obj->base.write_domain & flush_domains)) if (!(obj->write_domain & flush_domains))
return; return;
switch (obj->base.write_domain) { switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT: case I915_GEM_DOMAIN_GTT:
i915_gem_flush_ggtt_writes(dev_priv); i915_gem_flush_ggtt_writes(dev_priv);
...@@ -731,7 +731,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) ...@@ -731,7 +731,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
break; break;
} }
obj->base.write_domain = 0; obj->write_domain = 0;
} }
static inline int static inline int
...@@ -831,7 +831,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ...@@ -831,7 +831,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* anyway again before the next pread happens. * anyway again before the next pread happens.
*/ */
if (!obj->cache_dirty && if (!obj->cache_dirty &&
!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) !(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE; *needs_clflush = CLFLUSH_BEFORE;
out: out:
...@@ -890,7 +890,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, ...@@ -890,7 +890,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* Same trick applies to invalidate partially written * Same trick applies to invalidate partially written
* cachelines read before writing. * cachelines read before writing.
*/ */
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE; *needs_clflush |= CLFLUSH_BEFORE;
} }
...@@ -2391,8 +2391,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2391,8 +2391,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache * a GPU cache
*/ */
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
...@@ -3703,7 +3703,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) ...@@ -3703,7 +3703,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty) if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->base.write_domain = 0; obj->write_domain = 0;
} }
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
...@@ -3740,7 +3740,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3740,7 +3740,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret) if (ret)
return ret; return ret;
if (obj->base.write_domain == I915_GEM_DOMAIN_WC) if (obj->write_domain == I915_GEM_DOMAIN_WC)
return 0; return 0;
/* Flush and acquire obj->pages so that we are coherent through /* Flush and acquire obj->pages so that we are coherent through
...@@ -3761,17 +3761,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3761,17 +3761,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the * coherent writes from the GPU, by effectively invalidating the
* WC domain upon first access. * WC domain upon first access.
*/ */
if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0) if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
mb(); mb();
/* It should now be out of any other write domains, and we can update /* It should now be out of any other write domains, and we can update
* the domain values for our changes. * the domain values for our changes.
*/ */
GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0); GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_WC; obj->read_domains |= I915_GEM_DOMAIN_WC;
if (write) { if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_WC; obj->read_domains = I915_GEM_DOMAIN_WC;
obj->base.write_domain = I915_GEM_DOMAIN_WC; obj->write_domain = I915_GEM_DOMAIN_WC;
obj->mm.dirty = true; obj->mm.dirty = true;
} }
...@@ -3803,7 +3803,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3803,7 +3803,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret) if (ret)
return ret; return ret;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) if (obj->write_domain == I915_GEM_DOMAIN_GTT)
return 0; return 0;
/* Flush and acquire obj->pages so that we are coherent through /* Flush and acquire obj->pages so that we are coherent through
...@@ -3824,17 +3824,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3824,17 +3824,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the * coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access. * GTT domain upon first access.
*/ */
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb(); mb();
/* It should now be out of any other write domains, and we can update /* It should now be out of any other write domains, and we can update
* the domain values for our changes. * the domain values for our changes.
*/ */
GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT; obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) { if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT; obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = I915_GEM_DOMAIN_GTT; obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true; obj->mm.dirty = true;
} }
...@@ -4146,7 +4146,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -4146,7 +4146,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update /* It should now be out of any other write domains, and we can update
* the domain values for our changes. * the domain values for our changes.
*/ */
obj->base.read_domains |= I915_GEM_DOMAIN_GTT; obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma; return vma;
...@@ -4199,15 +4199,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -4199,15 +4199,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */ /* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
obj->base.read_domains |= I915_GEM_DOMAIN_CPU; obj->read_domains |= I915_GEM_DOMAIN_CPU;
} }
/* It should now be out of any other write domains, and we can update /* It should now be out of any other write domains, and we can update
* the domain values for our changes. * the domain values for our changes.
*/ */
GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will /* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use. * need to be invalidated at next use.
...@@ -4643,8 +4643,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) ...@@ -4643,8 +4643,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
i915_gem_object_init(obj, &i915_gem_object_ops); i915_gem_object_init(obj, &i915_gem_object_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
if (HAS_LLC(dev_priv)) if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU /* On some devices, we can have the GPU use the LLC (the CPU
...@@ -5702,7 +5702,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, ...@@ -5702,7 +5702,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
if (IS_ERR(obj)) if (IS_ERR(obj))
return obj; return obj;
GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp; file = obj->base.filp;
offset = 0; offset = 0;
......
...@@ -177,7 +177,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -177,7 +177,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
} else if (obj->mm.pages) { } else if (obj->mm.pages) {
__i915_do_clflush(obj); __i915_do_clflush(obj);
} else { } else {
GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
} }
obj->cache_dirty = false; obj->cache_dirty = false;
......
...@@ -330,8 +330,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, ...@@ -330,8 +330,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
* write-combined buffer or a delay through the chipset for GTT * write-combined buffer or a delay through the chipset for GTT
* writes that do require us to treat GTT as a separate cache domain.) * writes that do require us to treat GTT as a separate cache domain.)
*/ */
obj->base.read_domains = I915_GEM_DOMAIN_GTT; obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = 0; obj->write_domain = 0;
return &obj->base; return &obj->base;
......
...@@ -1073,7 +1073,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1073,7 +1073,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd; u32 *cmd;
int err; int err;
GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU); GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
...@@ -1861,16 +1861,16 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1861,16 +1861,16 @@ void i915_vma_move_to_active(struct i915_vma *vma,
i915_gem_active_set(&vma->last_read[idx], req); i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list); list_move_tail(&vma->vm_link, &vma->vm->active_list);
obj->base.write_domain = 0; obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) { if (flags & EXEC_OBJECT_WRITE) {
obj->base.write_domain = I915_GEM_DOMAIN_RENDER; obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
i915_gem_active_set(&obj->frontbuffer_write, req); i915_gem_active_set(&obj->frontbuffer_write, req);
obj->base.read_domains = 0; obj->read_domains = 0;
} }
obj->base.read_domains |= I915_GEM_GPU_DOMAINS; obj->read_domains |= I915_GEM_GPU_DOMAINS;
if (flags & EXEC_OBJECT_NEEDS_FENCE) if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req); i915_gem_active_set(&vma->last_fence, req);
......
...@@ -201,8 +201,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, ...@@ -201,8 +201,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size); drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops); i915_gem_object_init(obj, &i915_gem_object_internal_ops);
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level); i915_gem_object_set_cache_coherency(obj, cache_level);
......
...@@ -148,6 +148,21 @@ struct drm_i915_gem_object { ...@@ -148,6 +148,21 @@ struct drm_i915_gem_object {
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1; unsigned int cache_dirty:1;
/**
* @read_domains: Read memory domains.
*
* These monitor which caches contain read/write data related to the
* object. When transitioning from one set of domains to another,
* the driver is called to ensure that caches are suitably flushed and
* invalidated.
*/
u16 read_domains;
/**
* @write_domain: Corresponding unique write memory domain.
*/
u16 write_domain;
atomic_t frontbuffer_bits; atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */ unsigned int frontbuffer_ggtt_origin; /* write once */
struct i915_gem_active frontbuffer_write; struct i915_gem_active frontbuffer_write;
......
...@@ -516,7 +516,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, ...@@ -516,7 +516,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
i915_gem_object_init(obj, &i915_gem_object_stolen_ops); i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen; obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level); i915_gem_object_set_cache_coherency(obj, cache_level);
......
...@@ -798,8 +798,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, ...@@ -798,8 +798,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, args->user_size); drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops); i915_gem_object_init(obj, &i915_gem_userptr_ops);
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr; obj->userptr.ptr = args->user_ptr;
......
...@@ -1021,8 +1021,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -1021,8 +1021,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->engine = __active_get_engine_id(&obj->frontbuffer_write); err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start; err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains; err->read_domains = obj->read_domains;
err->write_domain = obj->base.write_domain; err->write_domain = obj->write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1; err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj); err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->mm.dirty; err->dirty = obj->mm.dirty;
......
...@@ -129,8 +129,8 @@ huge_gem_object(struct drm_i915_private *i915, ...@@ -129,8 +129,8 @@ huge_gem_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops); i915_gem_object_init(obj, &huge_ops);
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level); i915_gem_object_set_cache_coherency(obj, cache_level);
obj->scratch = phys_size; obj->scratch = phys_size;
......
...@@ -178,8 +178,8 @@ huge_pages_object(struct drm_i915_private *i915, ...@@ -178,8 +178,8 @@ huge_pages_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size); drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops); i915_gem_object_init(obj, &huge_page_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE; obj->cache_level = I915_CACHE_NONE;
obj->mm.page_mask = page_mask; obj->mm.page_mask = page_mask;
...@@ -329,8 +329,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) ...@@ -329,8 +329,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
else else
i915_gem_object_init(obj, &fake_ops); i915_gem_object_init(obj, &fake_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE; obj->cache_level = I915_CACHE_NONE;
return obj; return obj;
......
...@@ -215,8 +215,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) ...@@ -215,8 +215,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
} }
i915_gem_obj_finish_shmem_access(obj); i915_gem_obj_finish_shmem_access(obj);
obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
obj->base.write_domain = 0; obj->write_domain = 0;
return 0; return 0;
} }
......
...@@ -113,8 +113,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) ...@@ -113,8 +113,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
drm_gem_private_object_init(&i915->drm, &obj->base, size); drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &fake_ops); i915_gem_object_init(obj, &fake_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE; obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */ /* Preallocate the "backing storage" */
......
...@@ -115,21 +115,6 @@ struct drm_gem_object { ...@@ -115,21 +115,6 @@ struct drm_gem_object {
*/ */
int name; int name;
/**
* @read_domains:
*
* Read memory domains. These monitor which caches contain read/write data
* related to the object. When transitioning from one set of domains
* to another, the driver is called to ensure that caches are suitably
* flushed and invalidated.
*/
uint32_t read_domains;
/**
* @write_domain: Corresponding unique write memory domain.
*/
uint32_t write_domain;
/** /**
* @dma_buf: * @dma_buf:
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment