Commit 4dd2fbbf authored by Chris Wilson's avatar Chris Wilson

drm/i915: Make i915_vma.flags atomic_t for mutex reduction

In preparation for reducing struct_mutex stranglehold around the vm,
make the vma.flags atomic so that we can acquire a pin on the vma
atomically before deciding if we need to take the mutex.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190911090243.16786-1-chris@chris-wilson.co.uk
parent 0606259e
...@@ -163,7 +163,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -163,7 +163,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK; atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
i915_vma_destroy(vma); i915_vma_destroy(vma);
} }
GEM_BUG_ON(!list_empty(&obj->vma.list)); GEM_BUG_ON(!list_empty(&obj->vma.list));
......
...@@ -688,7 +688,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -688,7 +688,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma->pages = obj->mm.pages; vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND; set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
......
...@@ -155,7 +155,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, ...@@ -155,7 +155,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 pte_flags; u32 pte_flags;
int err; int err;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) { if (!i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) {
err = vma->vm->allocate_va_range(vma->vm, err = vma->vm->allocate_va_range(vma->vm,
vma->node.start, vma->size); vma->node.start, vma->size);
if (err) if (err)
...@@ -1877,7 +1877,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) ...@@ -1877,7 +1877,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
vma->size = size; vma->size = size;
vma->fence_size = size; vma->fence_size = size;
vma->flags = I915_VMA_GGTT; atomic_set(&vma->flags, I915_VMA_GGTT);
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->obj_link);
...@@ -2440,7 +2440,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2440,7 +2440,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding. * upgrade to both bound if we bind either to avoid double-binding.
*/ */
vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0; return 0;
} }
...@@ -2470,7 +2470,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -2470,7 +2470,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_LOCAL_BIND) { if (flags & I915_VMA_LOCAL_BIND) {
struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) { if (!i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) {
ret = alias->vm.allocate_va_range(&alias->vm, ret = alias->vm.allocate_va_range(&alias->vm,
vma->node.start, vma->node.start,
vma->size); vma->size);
...@@ -2498,7 +2498,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) ...@@ -2498,7 +2498,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
{ {
struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_private *i915 = vma->vm->i915;
if (vma->flags & I915_VMA_GLOBAL_BIND) { if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
struct i915_address_space *vm = vma->vm; struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
...@@ -2506,7 +2506,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) ...@@ -2506,7 +2506,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
vm->clear_range(vm, vma->node.start, vma->size); vm->clear_range(vm, vma->node.start, vma->size);
} }
if (vma->flags & I915_VMA_LOCAL_BIND) { if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) {
struct i915_address_space *vm = struct i915_address_space *vm =
&i915_vm_to_ggtt(vma->vm)->alias->vm; &i915_vm_to_ggtt(vma->vm)->alias->vm;
...@@ -3308,7 +3308,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) ...@@ -3308,7 +3308,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue; continue;
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
......
...@@ -171,7 +171,7 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -171,7 +171,7 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj)); i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
vma->flags |= I915_VMA_GGTT; __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
} }
spin_lock(&obj->vma.lock); spin_lock(&obj->vma.lock);
...@@ -325,7 +325,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -325,7 +325,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (flags & PIN_USER) if (flags & PIN_USER)
bind_flags |= I915_VMA_LOCAL_BIND; bind_flags |= I915_VMA_LOCAL_BIND;
vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); vma_flags = atomic_read(&vma->flags);
vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
if (flags & PIN_UPDATE) if (flags & PIN_UPDATE)
bind_flags |= vma_flags; bind_flags |= vma_flags;
else else
...@@ -340,7 +341,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -340,7 +341,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (ret) if (ret)
return ret; return ret;
vma->flags |= bind_flags; atomic_or(bind_flags, &vma->flags);
return 0; return 0;
} }
...@@ -359,7 +360,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -359,7 +360,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
} }
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
ptr = vma->iomap; ptr = vma->iomap;
if (ptr == NULL) { if (ptr == NULL) {
...@@ -472,9 +473,9 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -472,9 +473,9 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable) if (mappable && fenceable)
vma->flags |= I915_VMA_CAN_FENCE; set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
else else
vma->flags &= ~I915_VMA_CAN_FENCE; clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
} }
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
...@@ -544,7 +545,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -544,7 +545,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
int ret; int ret;
GEM_BUG_ON(i915_vma_is_closed(vma)); GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size); size = max(size, vma->size);
...@@ -678,7 +679,7 @@ static void ...@@ -678,7 +679,7 @@ static void
i915_vma_remove(struct i915_vma *vma) i915_vma_remove(struct i915_vma *vma)
{ {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
vma->ops->clear_pages(vma); vma->ops->clear_pages(vma);
...@@ -709,7 +710,7 @@ i915_vma_remove(struct i915_vma *vma) ...@@ -709,7 +710,7 @@ i915_vma_remove(struct i915_vma *vma)
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags) u64 size, u64 alignment, u64 flags)
{ {
const unsigned int bound = vma->flags; const unsigned int bound = atomic_read(&vma->flags);
int ret; int ret;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
...@@ -732,9 +733,9 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -732,9 +733,9 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (ret) if (ret)
goto err_remove; goto err_remove;
GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_BIND_MASK));
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) if ((bound ^ atomic_read(&vma->flags)) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
...@@ -744,7 +745,7 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -744,7 +745,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if ((bound & I915_VMA_BIND_MASK) == 0) { if ((bound & I915_VMA_BIND_MASK) == 0) {
i915_vma_remove(vma); i915_vma_remove(vma);
GEM_BUG_ON(vma->pages); GEM_BUG_ON(vma->pages);
GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_BIND_MASK);
} }
err_unpin: err_unpin:
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
...@@ -991,7 +992,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -991,7 +992,7 @@ int i915_vma_unbind(struct i915_vma *vma)
mutex_unlock(&vma->vm->mutex); mutex_unlock(&vma->vm->mutex);
__i915_vma_iounmap(vma); __i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE; clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
} }
GEM_BUG_ON(vma->fence); GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma)); GEM_BUG_ON(i915_vma_has_userfault(vma));
...@@ -1000,7 +1001,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -1000,7 +1001,7 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma); trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma); vma->ops->unbind_vma(vma);
} }
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); atomic_and(~I915_VMA_BIND_MASK, &vma->flags);
i915_vma_remove(vma); i915_vma_remove(vma);
......
...@@ -72,7 +72,7 @@ struct i915_vma { ...@@ -72,7 +72,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma. * that exist in the ctx->handle_vmas LUT for this vma.
*/ */
atomic_t open_count; atomic_t open_count;
unsigned long flags; atomic_t flags;
/** /**
* How many users have pinned this object in GTT space. * How many users have pinned this object in GTT space.
* *
...@@ -97,18 +97,29 @@ struct i915_vma { ...@@ -97,18 +97,29 @@ struct i915_vma {
* users. * users.
*/ */
#define I915_VMA_PIN_MASK 0xff #define I915_VMA_PIN_MASK 0xff
#define I915_VMA_PIN_OVERFLOW BIT(8) #define I915_VMA_PIN_OVERFLOW_BIT 8
#define I915_VMA_PIN_OVERFLOW ((int)BIT(I915_VMA_PIN_OVERFLOW_BIT))
/** Flags and address space this VMA is bound to */ /** Flags and address space this VMA is bound to */
#define I915_VMA_GLOBAL_BIND BIT(9) #define I915_VMA_GLOBAL_BIND_BIT 9
#define I915_VMA_LOCAL_BIND BIT(10) #define I915_VMA_LOCAL_BIND_BIT 10
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
#define I915_VMA_GGTT BIT(11) #define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
#define I915_VMA_CAN_FENCE BIT(12) #define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | \
I915_VMA_LOCAL_BIND | \
I915_VMA_PIN_OVERFLOW)
#define I915_VMA_GGTT_BIT 11
#define I915_VMA_CAN_FENCE_BIT 12
#define I915_VMA_USERFAULT_BIT 13 #define I915_VMA_USERFAULT_BIT 13
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) #define I915_VMA_GGTT_WRITE_BIT 14
#define I915_VMA_GGTT_WRITE BIT(14)
#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
struct i915_active active; struct i915_active active;
...@@ -162,48 +173,51 @@ int __must_check i915_vma_move_to_active(struct i915_vma *vma, ...@@ -162,48 +173,51 @@ int __must_check i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq, struct i915_request *rq,
unsigned int flags); unsigned int flags);
#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{ {
return vma->flags & I915_VMA_GGTT; return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
} }
static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{ {
return vma->flags & I915_VMA_GGTT_WRITE; return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
} }
static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
{ {
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
vma->flags |= I915_VMA_GGTT_WRITE; set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
} }
static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma) static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
{ {
vma->flags &= ~I915_VMA_GGTT_WRITE; return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
__i915_vma_flags(vma));
} }
void i915_vma_flush_writes(struct i915_vma *vma); void i915_vma_flush_writes(struct i915_vma *vma);
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{ {
return vma->flags & I915_VMA_CAN_FENCE; return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
} }
static inline bool i915_vma_set_userfault(struct i915_vma *vma) static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{ {
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags); return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
} }
static inline void i915_vma_unset_userfault(struct i915_vma *vma) static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{ {
return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags); return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
} }
static inline bool i915_vma_has_userfault(const struct i915_vma *vma) static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{ {
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
} }
static inline bool i915_vma_is_closed(const struct i915_vma *vma) static inline bool i915_vma_is_closed(const struct i915_vma *vma)
...@@ -330,7 +344,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -330,7 +344,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
/* Pin early to prevent the shrinker/eviction logic from destroying /* Pin early to prevent the shrinker/eviction logic from destroying
* our vma as we insert and bind. * our vma as we insert and bind.
*/ */
if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) { if (likely(((atomic_inc_return(&vma->flags) ^ flags) & I915_VMA_BIND_MASK) == 0)) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0; return 0;
...@@ -341,7 +355,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -341,7 +355,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
static inline int i915_vma_pin_count(const struct i915_vma *vma) static inline int i915_vma_pin_count(const struct i915_vma *vma)
{ {
return vma->flags & I915_VMA_PIN_MASK; return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
} }
static inline bool i915_vma_is_pinned(const struct i915_vma *vma) static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
...@@ -351,13 +365,13 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma) ...@@ -351,13 +365,13 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma) static inline void __i915_vma_pin(struct i915_vma *vma)
{ {
vma->flags++; atomic_inc(&vma->flags);
GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW); GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_PIN_OVERFLOW);
} }
static inline void __i915_vma_unpin(struct i915_vma *vma) static inline void __i915_vma_unpin(struct i915_vma *vma)
{ {
vma->flags--; atomic_dec(&vma->flags);
} }
static inline void i915_vma_unpin(struct i915_vma *vma) static inline void i915_vma_unpin(struct i915_vma *vma)
...@@ -370,7 +384,7 @@ static inline void i915_vma_unpin(struct i915_vma *vma) ...@@ -370,7 +384,7 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
static inline bool i915_vma_is_bound(const struct i915_vma *vma, static inline bool i915_vma_is_bound(const struct i915_vma *vma,
unsigned int where) unsigned int where)
{ {
return vma->flags & where; return atomic_read(&vma->flags) & where;
} }
static inline bool i915_node_color_differs(const struct drm_mm_node *node, static inline bool i915_node_color_differs(const struct drm_mm_node *node,
......
...@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma, ...@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags) u32 flags)
{ {
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND); GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
vma->flags |= I915_VMA_LOCAL_BIND; set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
return 0; return 0;
} }
...@@ -86,7 +86,7 @@ static int mock_bind_ggtt(struct i915_vma *vma, ...@@ -86,7 +86,7 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment