Commit d4d7d363 authored by Rob Clark's avatar Rob Clark

drm/msm/gem: Add msm_gem_assert_locked()

All use of msm_gem_is_locked() is just for WARN_ON()s, so extract out
into an msm_gem_assert_locked() patch.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/496136/
Link: https://lore.kernel.org/r/20220802155152.1727594-15-robdclark@gmail.com
parent 025d2723
...@@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
if (!msm_obj->pages) { if (!msm_obj->pages) {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
...@@ -183,7 +183,7 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) ...@@ -183,7 +183,7 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p; struct page **p;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
...@@ -278,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) ...@@ -278,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
int ret; int ret;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
/* Make it mmapable */ /* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj); ret = drm_gem_create_mmap_offset(obj);
...@@ -307,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, ...@@ -307,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
vma = kzalloc(sizeof(*vma), GFP_KERNEL); vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) if (!vma)
...@@ -326,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, ...@@ -326,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) { list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace) if (vma->aspace == aspace)
...@@ -357,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close) ...@@ -357,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) { list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) { if (vma->aspace) {
...@@ -375,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj) ...@@ -375,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp; struct msm_gem_vma *vma, *tmp;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma); del_vma(vma);
...@@ -388,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, ...@@ -388,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
{ {
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
...@@ -428,7 +428,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) ...@@ -428,7 +428,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT) if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE; prot |= IOMMU_CACHE;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY; return -EBUSY;
...@@ -448,7 +448,7 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj) ...@@ -448,7 +448,7 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
msm_obj->pin_count--; msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0); GEM_WARN_ON(msm_obj->pin_count < 0);
...@@ -469,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, ...@@ -469,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
int ret; int ret;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
vma = get_vma_locked(obj, aspace, range_start, range_end); vma = get_vma_locked(obj, aspace, range_start, range_end);
if (IS_ERR(vma)) if (IS_ERR(vma))
...@@ -630,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) ...@@ -630,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0; int ret = 0;
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
if (obj->import_attach) if (obj->import_attach)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
...@@ -703,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) ...@@ -703,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1); GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--; msm_obj->vmap_count--;
...@@ -745,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -745,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj)); GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */ /* Get rid of any iommu mapping(s): */
...@@ -782,7 +782,7 @@ void msm_gem_evict(struct drm_gem_object *obj) ...@@ -782,7 +782,7 @@ void msm_gem_evict(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj)); GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */ /* Get rid of any iommu mapping(s): */
...@@ -797,7 +797,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj) ...@@ -797,7 +797,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return; return;
...@@ -811,7 +811,7 @@ static void update_lru(struct drm_gem_object *obj) ...@@ -811,7 +811,7 @@ static void update_lru(struct drm_gem_object *obj)
struct msm_drm_private *priv = obj->dev->dev_private; struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); msm_gem_assert_locked(&msm_obj->base);
if (!msm_obj->pages) { if (!msm_obj->pages) {
GEM_WARN_ON(msm_obj->pin_count); GEM_WARN_ON(msm_obj->pin_count);
...@@ -831,7 +831,7 @@ static void update_lru(struct drm_gem_object *obj) ...@@ -831,7 +831,7 @@ static void update_lru(struct drm_gem_object *obj)
bool msm_gem_active(struct drm_gem_object *obj) bool msm_gem_active(struct drm_gem_object *obj)
{ {
GEM_WARN_ON(!msm_gem_is_locked(obj)); msm_gem_assert_locked(obj);
if (to_msm_bo(obj)->pin_count) if (to_msm_bo(obj)->pin_count)
return true; return true;
......
...@@ -215,6 +215,12 @@ msm_gem_is_locked(struct drm_gem_object *obj) ...@@ -215,6 +215,12 @@ msm_gem_is_locked(struct drm_gem_object *obj)
return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0); return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
} }
static inline void
msm_gem_assert_locked(struct drm_gem_object *obj)
{
GEM_WARN_ON(!msm_gem_is_locked(obj));
}
/* imported/exported objects are not purgeable: */ /* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{ {
...@@ -229,7 +235,7 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj) ...@@ -229,7 +235,7 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
static inline bool is_vunmapable(struct msm_gem_object *msm_obj) static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{ {
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); msm_gem_assert_locked(&msm_obj->base);
return (msm_obj->vmap_count == 0) && msm_obj->vaddr; return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment