Commit 71724f70 authored by Chris Wilson's avatar Chris Wilson

drm/mm: Use helpers for drm_mm_node booleans

In preparation for rearranging the booleans into a flags field, ensure
all the current users are using the inline helpers and not directly
accessing the members.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003210100.22250-3-chris@chris-wilson.co.uk
parent 9c98f021
...@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, ...@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
node->__subtree_last = LAST(node); node->__subtree_last = LAST(node);
if (hole_node->allocated) { if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb; rb = &hole_node->rb;
while (rb) { while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb); parent = rb_entry(rb, struct drm_mm_node, rb);
...@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, ...@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
} }
EXPORT_SYMBOL(drm_mm_insert_node_in_range); EXPORT_SYMBOL(drm_mm_insert_node_in_range);
static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
{
return node->scanned_block;
}
/** /**
* drm_mm_remove_node - Remove a memory node from the allocator. * drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove * @node: drm_mm_node to remove
...@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node) ...@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm; struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node; struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(!node->allocated); DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
DRM_MM_BUG_ON(node->scanned_block); DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list); prev_node = list_prev_entry(node, node_list);
...@@ -605,7 +610,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) ...@@ -605,7 +610,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{ {
struct drm_mm *mm = old->mm; struct drm_mm *mm = old->mm;
DRM_MM_BUG_ON(!old->allocated); DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
*new = *old; *new = *old;
...@@ -731,8 +736,8 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan, ...@@ -731,8 +736,8 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
u64 adj_start, adj_end; u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm); DRM_MM_BUG_ON(node->mm != mm);
DRM_MM_BUG_ON(!node->allocated); DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
DRM_MM_BUG_ON(node->scanned_block); DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
node->scanned_block = true; node->scanned_block = true;
mm->scan_active++; mm->scan_active++;
...@@ -818,7 +823,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, ...@@ -818,7 +823,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *prev_node; struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm); DRM_MM_BUG_ON(node->mm != scan->mm);
DRM_MM_BUG_ON(!node->scanned_block); DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
node->scanned_block = false; node->scanned_block = false;
DRM_MM_BUG_ON(!node->mm->scan_active); DRM_MM_BUG_ON(!node->mm->scan_active);
......
...@@ -963,7 +963,7 @@ static void reloc_cache_reset(struct reloc_cache *cache) ...@@ -963,7 +963,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt); intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr); io_mapping_unmap_atomic((void __iomem *)vaddr);
if (cache->node.allocated) { if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.clear_range(&ggtt->vm,
cache->node.start, cache->node.start,
cache->node.size); cache->node.size);
...@@ -1056,7 +1056,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1056,7 +1056,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
} }
offset = cache->node.start; offset = cache->node.start;
if (cache->node.allocated) { if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page), i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
......
...@@ -400,7 +400,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) ...@@ -400,7 +400,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{ {
struct drm_mm_node *node = &ggtt->uc_fw; struct drm_mm_node *node = &ggtt->uc_fw;
GEM_BUG_ON(!node->allocated); GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start)); GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
......
...@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
GEM_BUG_ON(!node.allocated); GEM_BUG_ON(!drm_mm_node_allocated(&node));
} }
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
...@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset); unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset; unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length; page_length = remain < page_length ? remain : page_length;
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0); node.start, I915_CACHE_NONE, 0);
...@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence); i915_gem_object_unlock_fence(obj, fence);
out_unpin: out_unpin:
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node); remove_mappable_node(&node);
} else { } else {
...@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret) if (ret)
goto out_rpm; goto out_rpm;
GEM_BUG_ON(!node.allocated); GEM_BUG_ON(!drm_mm_node_allocated(&node));
} }
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
...@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset); unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset; unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length; page_length = remain < page_length ? remain : page_length;
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
/* flush the write before we modify the GGTT */ /* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt); intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm, ggtt->vm.insert_page(&ggtt->vm,
...@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin: out_unpin:
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt); intel_gt_flush_ggtt_writes(ggtt->vm.gt);
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node); remove_mappable_node(&node);
} else { } else {
......
...@@ -301,7 +301,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -301,7 +301,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break; break;
} }
GEM_BUG_ON(!node->allocated); GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node); vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between /* If we are using coloring to insert guard pages between
......
...@@ -2560,7 +2560,7 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node, ...@@ -2560,7 +2560,7 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
u64 *start, u64 *start,
u64 *end) u64 *end)
{ {
if (node->allocated && node->color != color) if (drm_mm_node_allocated(node) && node->color != color)
*start += I915_GTT_PAGE_SIZE; *start += I915_GTT_PAGE_SIZE;
/* Also leave a space between the unallocated reserved node after the /* Also leave a space between the unallocated reserved node after the
......
...@@ -479,7 +479,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -479,7 +479,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
static bool color_differs(struct drm_mm_node *node, unsigned long color) static bool color_differs(struct drm_mm_node *node, unsigned long color)
{ {
return node->allocated && node->color != color; return drm_mm_node_allocated(node) && node->color != color;
} }
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
...@@ -797,7 +797,7 @@ void i915_vma_reopen(struct i915_vma *vma) ...@@ -797,7 +797,7 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma)
{ {
GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->fence); GEM_BUG_ON(vma->fence);
mutex_lock(&vma->vm->mutex); mutex_lock(&vma->vm->mutex);
......
...@@ -214,7 +214,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma) ...@@ -214,7 +214,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u32 i915_ggtt_offset(const struct i915_vma *vma) static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{ {
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->node.allocated); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(upper_32_bits(vma->node.start)); GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start); return lower_32_bits(vma->node.start);
......
...@@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm, ...@@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) { if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1); node = __drm_mm_interval_first(mm, 0, start - 1);
if (node->allocated) { if (drm_mm_node_allocated(node)) {
pr_err("node before start: node=%llx+%llu, start=%llx\n", pr_err("node before start: node=%llx+%llu, start=%llx\n",
node->start, node->size, start); node->start, node->size, start);
return false; return false;
...@@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm, ...@@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) { if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX); node = __drm_mm_interval_first(mm, end, U64_MAX);
if (node->allocated) { if (drm_mm_node_allocated(node)) {
pr_err("node after end: node=%llx+%llu, end=%llx\n", pr_err("node after end: node=%llx+%llu, end=%llx\n",
node->start, node->size, end); node->start, node->size, end);
return false; return false;
...@@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count) ...@@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count)
struct drm_mm_node *next = list_next_entry(hole, node_list); struct drm_mm_node *next = list_next_entry(hole, node_list);
const char *node1 = NULL, *node2 = NULL; const char *node1 = NULL, *node2 = NULL;
if (hole->allocated) if (drm_mm_node_allocated(hole))
node1 = kasprintf(GFP_KERNEL, node1 = kasprintf(GFP_KERNEL,
"[%llx + %lld, color=%ld], ", "[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color); hole->start, hole->size, hole->color);
if (next->allocated) if (drm_mm_node_allocated(next))
node2 = kasprintf(GFP_KERNEL, node2 = kasprintf(GFP_KERNEL,
", [%llx + %lld, color=%ld]", ", [%llx + %lld, color=%ld]",
next->start, next->size, next->color); next->start, next->size, next->color);
...@@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node, ...@@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
u64 *start, u64 *start,
u64 *end) u64 *end)
{ {
if (node->allocated && node->color != color) if (drm_mm_node_allocated(node) && node->color != color)
++*start; ++*start;
node = list_next_entry(node, node_list); node = list_next_entry(node, node_list);
if (node->allocated && node->color != color) if (drm_mm_node_allocated(node) && node->color != color)
--*end; --*end;
} }
static bool colors_abutt(const struct drm_mm_node *node) static bool colors_abutt(const struct drm_mm_node *node)
{ {
if (!drm_mm_hole_follows(node) && if (!drm_mm_hole_follows(node) &&
list_next_entry(node, node_list)->allocated) { drm_mm_node_allocated(list_next_entry(node, node_list))) {
pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n", pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
node->color, node->start, node->size, node->color, node->start, node->size,
list_next_entry(node, node_list)->color, list_next_entry(node, node_list)->color,
......
...@@ -994,7 +994,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc, ...@@ -994,7 +994,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(crtc->dev); struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
if (vc4_state->mm.allocated) { if (drm_mm_node_allocated(&vc4_state->mm)) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&vc4->hvs->mm_lock, flags); spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
......
...@@ -315,7 +315,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, ...@@ -315,7 +315,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master); struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private; struct vc4_dev *vc4 = drm->dev_private;
if (vc4->hvs->mitchell_netravali_filter.allocated) if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter); drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
drm_mm_takedown(&vc4->hvs->dlist_mm); drm_mm_takedown(&vc4->hvs->dlist_mm);
......
...@@ -178,7 +178,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane, ...@@ -178,7 +178,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane,
struct vc4_dev *vc4 = to_vc4_dev(plane->dev); struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
if (vc4_state->lbm.allocated) { if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
...@@ -557,7 +557,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) ...@@ -557,7 +557,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
/* Allocate the LBM memory that the HVS will use for temporary /* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion. * storage due to our scaling/format conversion.
*/ */
if (!vc4_state->lbm.allocated) { if (!drm_mm_node_allocated(&vc4_state->lbm)) {
int ret; int ret;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment