Commit b352ba54 authored by Rob Clark's avatar Rob Clark

drm/msm/gem: Convert to using drm_gem_lru

This converts over to use the shared GEM LRU/shrinker helpers.  Note
that it means we are no longer tracking purgeable or willneed buffers
that are active separately.  But the most recently pinned buffers should
be at the tail of the various LRUs, and the shrinker is already prepared
to encounter objects which are still active.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/496131/
Link: https://lore.kernel.org/r/20220802155152.1727594-11-robdclark@gmail.com
parent e7c2af13
...@@ -418,14 +418,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) ...@@ -418,14 +418,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->objects); INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock); mutex_init(&priv->obj_lock);
INIT_LIST_HEAD(&priv->inactive_willneed); /*
INIT_LIST_HEAD(&priv->inactive_dontneed); * Initialize the LRUs:
INIT_LIST_HEAD(&priv->inactive_unpinned); */
mutex_init(&priv->mm_lock); mutex_init(&priv->lru.lock);
drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
/* Teach lockdep about lock ordering wrt. shrinker: */ /* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL); fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->mm_lock); might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL); fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev); drm_mode_config_init(ddev);
......
...@@ -142,28 +142,60 @@ struct msm_drm_private { ...@@ -142,28 +142,60 @@ struct msm_drm_private {
struct mutex obj_lock; struct mutex obj_lock;
/** /**
* LRUs of inactive GEM objects. Every bo is either in one of the * lru:
* inactive lists (depending on whether or not it is shrinkable) or
* gpu->active_list (for the gpu it is active on[1]), or transiently
* on a temporary list as the shrinker is running.
* *
* Note that inactive_willneed also contains pinned and vmap'd bos, * The various LRU's that a GEM object is in at various stages of
* but the number of pinned-but-not-active objects is small (scanout * it's lifetime. Objects start out in the unbacked LRU. When
* buffers, ringbuffer, etc). * pinned (for scannout or permanently mapped GPU buffers, like
* ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
* unpinned, it moves into willneed or dontneed LRU depending on
* madvise state. When backing pages are evicted (willneed) or
* purged (dontneed) it moves back into the unbacked LRU.
* *
* These lists are protected by mm_lock (which should be acquired * The dontneed LRU is considered by the shrinker for objects
* before per GEM object lock). One should *not* hold mm_lock in * that are candidate for purging, and the willneed LRU is
* get_pages()/vmap()/etc paths, as they can trigger the shrinker. * considered for objects that could be evicted.
*/
struct {
/**
* unbacked:
*
* The LRU for GEM objects without backing pages allocated.
* This mostly exists so that objects are always is one
* LRU.
*/
struct drm_gem_lru unbacked;
/**
* pinned:
*
* The LRU for pinned GEM objects
*/
struct drm_gem_lru pinned;
/**
* willneed:
*
* The LRU for unpinned GEM objects which are in madvise
* WILLNEED state (ie. can be evicted)
*/
struct drm_gem_lru willneed;
/**
* dontneed:
*
* The LRU for unpinned GEM objects which are in madvise
* DONTNEED state (ie. can be purged)
*/
struct drm_gem_lru dontneed;
/**
* lock:
* *
* [1] if someone ever added support for the old 2d cores, there could be * Protects manipulation of all of the LRUs.
* more than one gpu object
*/ */
struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */ struct mutex lock;
struct list_head inactive_dontneed; /* inactive + shrinkable */ } lru;
struct list_head inactive_unpinned; /* inactive + purged or unpinned */
long shrinkable_count; /* write access under mm_lock */
long evictable_count; /* write access under mm_lock */
struct mutex mm_lock;
struct workqueue_struct *wq; struct workqueue_struct *wq;
......
...@@ -174,6 +174,7 @@ static void put_pages(struct drm_gem_object *obj) ...@@ -174,6 +174,7 @@ static void put_pages(struct drm_gem_object *obj)
put_pages_vram(obj); put_pages_vram(obj);
msm_obj->pages = NULL; msm_obj->pages = NULL;
update_lru(obj);
} }
} }
...@@ -210,8 +211,6 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj) ...@@ -210,8 +211,6 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
void msm_gem_unpin_pages(struct drm_gem_object *obj) void msm_gem_unpin_pages(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj); msm_gem_lock(obj);
msm_gem_unpin_locked(obj); msm_gem_unpin_locked(obj);
msm_gem_unlock(obj); msm_gem_unlock(obj);
...@@ -761,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -761,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj); put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED; msm_obj->madv = __MSM_MADV_PURGED;
update_lru(obj);
drm_gem_free_mmap_offset(obj); drm_gem_free_mmap_offset(obj);
...@@ -786,7 +784,6 @@ void msm_gem_evict(struct drm_gem_object *obj) ...@@ -786,7 +784,6 @@ void msm_gem_evict(struct drm_gem_object *obj)
GEM_WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
GEM_WARN_ON(is_unevictable(msm_obj)); GEM_WARN_ON(is_unevictable(msm_obj));
GEM_WARN_ON(!msm_obj->evictable);
/* Get rid of any iommu mapping(s): */ /* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, false); put_iova_spaces(obj, false);
...@@ -794,8 +791,6 @@ void msm_gem_evict(struct drm_gem_object *obj) ...@@ -794,8 +791,6 @@ void msm_gem_evict(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj); put_pages(obj);
update_lru(obj);
} }
void msm_gem_vunmap(struct drm_gem_object *obj) void msm_gem_vunmap(struct drm_gem_object *obj)
...@@ -818,26 +813,20 @@ static void update_lru(struct drm_gem_object *obj) ...@@ -818,26 +813,20 @@ static void update_lru(struct drm_gem_object *obj)
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
mutex_lock(&priv->mm_lock); if (!msm_obj->pages) {
GEM_WARN_ON(msm_obj->pin_count);
if (msm_obj->dontneed) GEM_WARN_ON(msm_obj->vmap_count);
mark_unpurgeable(msm_obj);
if (msm_obj->evictable) drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mark_unevictable(msm_obj); } else if (msm_obj->pin_count || msm_obj->vmap_count) {
drm_gem_lru_move_tail(&priv->lru.pinned, obj);
list_del(&msm_obj->mm_list); } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { drm_gem_lru_move_tail(&priv->lru.willneed, obj);
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mark_evictable(msm_obj);
} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgeable(msm_obj);
} else { } else {
GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
}
mutex_unlock(&priv->mm_lock); drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
}
} }
bool msm_gem_active(struct drm_gem_object *obj) bool msm_gem_active(struct drm_gem_object *obj)
...@@ -995,12 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -995,12 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->node); list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock); mutex_unlock(&priv->obj_lock);
mutex_lock(&priv->mm_lock);
if (msm_obj->dontneed)
mark_unpurgeable(msm_obj);
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
put_iova_spaces(obj, true); put_iova_spaces(obj, true);
if (obj->import_attach) { if (obj->import_attach) {
...@@ -1160,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 ...@@ -1160,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
to_msm_bo(obj)->vram_node = &vma->node; to_msm_bo(obj)->vram_node = &vma->node;
/* Call chain get_pages() -> update_inactive() tries to
* access msm_obj->mm_list, but it is not initialized yet.
* To avoid NULL pointer dereference error, initialize
* mm_list to be empty.
*/
INIT_LIST_HEAD(&msm_obj->mm_list);
msm_gem_lock(obj); msm_gem_lock(obj);
pages = get_pages(obj); pages = get_pages(obj);
msm_gem_unlock(obj); msm_gem_unlock(obj);
...@@ -1189,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 ...@@ -1189,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
} }
mutex_lock(&priv->mm_lock); drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
mutex_unlock(&priv->mm_lock);
mutex_lock(&priv->obj_lock); mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects); list_add_tail(&msm_obj->node, &priv->objects);
...@@ -1247,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -1247,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj); msm_gem_unlock(obj);
mutex_lock(&priv->mm_lock); drm_gem_lru_move_tail(&priv->lru.pinned, obj);
list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
mutex_unlock(&priv->mm_lock);
mutex_lock(&priv->obj_lock); mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects); list_add_tail(&msm_obj->node, &priv->objects);
......
...@@ -93,16 +93,6 @@ struct msm_gem_object { ...@@ -93,16 +93,6 @@ struct msm_gem_object {
*/ */
uint8_t madv; uint8_t madv;
/**
* Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
*/
bool dontneed : 1;
/**
* Is object evictable (ie. counted in priv->evictable_count)?
*/
bool evictable : 1;
/** /**
* count of active vmap'ing * count of active vmap'ing
*/ */
...@@ -114,17 +104,6 @@ struct msm_gem_object { ...@@ -114,17 +104,6 @@ struct msm_gem_object {
*/ */
struct list_head node; struct list_head node;
/**
* An object is either:
* inactive - on priv->inactive_dontneed or priv->inactive_willneed
* (depending on purgeability status)
* active - on one one of the gpu's active_list.. well, at
* least for now we don't have (I don't think) hw sync between
* 2d and 3d one devices which have both, meaning we need to
* block on submit if a bo is already on other ring
*/
struct list_head mm_list;
struct page **pages; struct page **pages;
struct sg_table *sgt; struct sg_table *sgt;
void *vaddr; void *vaddr;
...@@ -206,12 +185,6 @@ msm_gem_lock(struct drm_gem_object *obj) ...@@ -206,12 +185,6 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL); dma_resv_lock(obj->resv, NULL);
} }
static inline bool __must_check
msm_gem_trylock(struct drm_gem_object *obj)
{
return dma_resv_trylock(obj->resv);
}
static inline int static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj) msm_gem_lock_interruptible(struct drm_gem_object *obj)
{ {
...@@ -260,77 +233,11 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) ...@@ -260,77 +233,11 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
return (msm_obj->vmap_count == 0) && msm_obj->vaddr; return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
} }
static inline void mark_purgeable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unpurgeable(msm_obj))
return;
if (GEM_WARN_ON(msm_obj->dontneed))
return;
priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->dontneed = true;
}
static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unpurgeable(msm_obj))
return;
if (GEM_WARN_ON(!msm_obj->dontneed))
return;
priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
GEM_WARN_ON(priv->shrinkable_count < 0);
msm_obj->dontneed = false;
}
static inline bool is_unevictable(struct msm_gem_object *msm_obj) static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{ {
return is_unpurgeable(msm_obj) || msm_obj->vaddr; return is_unpurgeable(msm_obj) || msm_obj->vaddr;
} }
static inline void mark_evictable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unevictable(msm_obj))
return;
if (WARN_ON(msm_obj->evictable))
return;
priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->evictable = true;
}
static inline void mark_unevictable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unevictable(msm_obj))
return;
if (WARN_ON(!msm_obj->evictable))
return;
priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->evictable_count < 0);
msm_obj->evictable = false;
}
void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_evict(struct drm_gem_object *obj); void msm_gem_evict(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj);
......
...@@ -29,121 +29,61 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -29,121 +29,61 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{ {
struct msm_drm_private *priv = struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker); container_of(shrinker, struct msm_drm_private, shrinker);
unsigned count = priv->shrinkable_count; unsigned count = priv->lru.dontneed.count;
if (can_swap()) if (can_swap())
count += priv->evictable_count; count += priv->lru.willneed.count;
return count; return count;
} }
static bool static bool
purge(struct msm_gem_object *msm_obj) purge(struct drm_gem_object *obj)
{ {
if (!is_purgeable(msm_obj)) if (!is_purgeable(to_msm_bo(obj)))
return false; return false;
if (msm_gem_active(&msm_obj->base)) if (msm_gem_active(obj))
return false; return false;
/* msm_gem_purge(obj);
* This will move the obj out of still_in_list to
* the purged list
*/
msm_gem_purge(&msm_obj->base);
return true; return true;
} }
static bool static bool
evict(struct msm_gem_object *msm_obj) evict(struct drm_gem_object *obj)
{ {
if (is_unevictable(msm_obj)) if (is_unevictable(to_msm_bo(obj)))
return false; return false;
if (msm_gem_active(&msm_obj->base)) if (msm_gem_active(obj))
return false; return false;
msm_gem_evict(&msm_obj->base); msm_gem_evict(obj);
return true; return true;
} }
static unsigned long
scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
bool (*shrink)(struct msm_gem_object *msm_obj))
{
unsigned freed = 0;
struct list_head still_in_list;
INIT_LIST_HEAD(&still_in_list);
mutex_lock(&priv->mm_lock);
while (freed < nr_to_scan) {
struct msm_gem_object *msm_obj = list_first_entry_or_null(
list, typeof(*msm_obj), mm_list);
if (!msm_obj)
break;
list_move_tail(&msm_obj->mm_list, &still_in_list);
/*
* If it is in the process of being freed, msm_gem_free_object
* can be blocked on mm_lock waiting to remove it. So just
* skip it.
*/
if (!kref_get_unless_zero(&msm_obj->base.refcount))
continue;
/*
* Now that we own a reference, we can drop mm_lock for the
* rest of the loop body, to reduce contention with the
* retire_submit path (which could make more objects purgeable)
*/
mutex_unlock(&priv->mm_lock);
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
if (!msm_gem_trylock(&msm_obj->base))
goto tail;
if (shrink(msm_obj))
freed += msm_obj->base.size >> PAGE_SHIFT;
msm_gem_unlock(&msm_obj->base);
tail:
drm_gem_object_put(&msm_obj->base);
mutex_lock(&priv->mm_lock);
}
list_splice_tail(&still_in_list, list);
mutex_unlock(&priv->mm_lock);
return freed;
}
static unsigned long static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{ {
struct msm_drm_private *priv = struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker); container_of(shrinker, struct msm_drm_private, shrinker);
long nr = sc->nr_to_scan;
unsigned long freed; unsigned long freed;
freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge); freed = drm_gem_lru_scan(&priv->lru.dontneed, nr, purge);
nr -= freed;
if (freed > 0) if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT); trace_msm_gem_purge(freed << PAGE_SHIFT);
if (can_swap() && freed < sc->nr_to_scan) { if (can_swap() && nr > 0) {
int evicted = scan(priv, sc->nr_to_scan - freed, unsigned long evicted;
&priv->inactive_willneed, evict);
evicted = drm_gem_lru_scan(&priv->lru.willneed, nr, evict);
nr -= evicted;
if (evicted > 0) if (evicted > 0)
trace_msm_gem_evict(evicted << PAGE_SHIFT); trace_msm_gem_evict(evicted << PAGE_SHIFT);
...@@ -179,12 +119,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan) ...@@ -179,12 +119,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15; static const int vmap_shrink_limit = 15;
static bool static bool
vmap_shrink(struct msm_gem_object *msm_obj) vmap_shrink(struct drm_gem_object *obj)
{ {
if (!is_vunmapable(msm_obj)) if (!is_vunmapable(to_msm_bo(obj)))
return false; return false;
msm_gem_vunmap(&msm_obj->base); msm_gem_vunmap(obj);
return true; return true;
} }
...@@ -194,17 +134,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -194,17 +134,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{ {
struct msm_drm_private *priv = struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier); container_of(nb, struct msm_drm_private, vmap_notifier);
struct list_head *mm_lists[] = { struct drm_gem_lru *lrus[] = {
&priv->inactive_dontneed, &priv->lru.dontneed,
&priv->inactive_willneed, &priv->lru.willneed,
priv->gpu ? &priv->gpu->active_list : NULL, &priv->lru.pinned,
NULL, NULL,
}; };
unsigned idx, unmapped = 0; unsigned idx, unmapped = 0;
for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) { for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += scan(priv, vmap_shrink_limit - unmapped, unmapped += drm_gem_lru_scan(lrus[idx],
mm_lists[idx], vmap_shrink); vmap_shrink_limit - unmapped,
vmap_shrink);
} }
*(unsigned long *)ptr += unmapped; *(unsigned long *)ptr += unmapped;
......
...@@ -846,7 +846,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -846,7 +846,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
sched_set_fifo_low(gpu->worker->task); sched_set_fifo_low(gpu->worker->task);
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock); mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock); mutex_init(&gpu->lock);
init_waitqueue_head(&gpu->retire_event); init_waitqueue_head(&gpu->retire_event);
...@@ -974,8 +973,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) ...@@ -974,8 +973,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
DBG("%s", gpu->name); DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]); msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL; gpu->rb[i] = NULL;
......
...@@ -187,12 +187,6 @@ struct msm_gpu { ...@@ -187,12 +187,6 @@ struct msm_gpu {
*/ */
int cur_ctx_seqno; int cur_ctx_seqno;
/*
* List of GEM active objects on this gpu. Protected by
* msm_drm_private::mm_lock
*/
struct list_head active_list;
/** /**
* lock: * lock:
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment