Commit 0e08270a authored by Sushmita Susheelendra's avatar Sushmita Susheelendra Committed by Rob Clark

drm/msm: Separate locking of buffer resources from struct_mutex

Buffer object specific resources like pages, domains, sg list
need not be protected with struct_mutex. They can be protected
with a buffer object level lock. This simplifies locking and
makes it easier to avoid potential recursive locking scenarios
for SVM involving mmap_sem and struct_mutex. This also removes
unnecessary serialization when creating buffer objects, and also
between buffer object creation and GPU command submission.
Signed-off-by: default avatarSushmita Susheelendra <ssusheel@codeaurora.org>
[robclark: squash in handling new locking for shrinker]
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 816fa34c
...@@ -297,18 +297,18 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, ...@@ -297,18 +297,18 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
struct drm_gem_object *bo; struct drm_gem_object *bo;
void *ptr; void *ptr;
bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED); bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
if (IS_ERR(bo)) if (IS_ERR(bo))
return bo; return bo;
ptr = msm_gem_get_vaddr_locked(bo); ptr = msm_gem_get_vaddr(bo);
if (!ptr) { if (!ptr) {
drm_gem_object_unreference(bo); drm_gem_object_unreference(bo);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (iova) { if (iova) {
int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova); int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
if (ret) { if (ret) {
drm_gem_object_unreference(bo); drm_gem_object_unreference(bo);
...@@ -318,7 +318,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, ...@@ -318,7 +318,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
memcpy(ptr, &fw->data[4], fw->size - 4); memcpy(ptr, &fw->data[4], fw->size - 4);
msm_gem_put_vaddr_locked(bo); msm_gem_put_vaddr(bo);
return bo; return bo;
} }
......
...@@ -294,15 +294,15 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -294,15 +294,15 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/ */
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED); a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
if (IS_ERR(a5xx_gpu->gpmu_bo)) if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err; goto err;
if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace, if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
&a5xx_gpu->gpmu_iova)) &a5xx_gpu->gpmu_iova))
goto err; goto err;
ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo); ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
if (!ptr) if (!ptr)
goto err; goto err;
...@@ -321,7 +321,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -321,7 +321,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
cmds_size -= _size; cmds_size -= _size;
} }
msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo); msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords; a5xx_gpu->gpmu_dwords = dwords;
goto out; goto out;
......
...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name); DBG("%s", gpu->name);
ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) { if (ret) {
gpu->rb_iova = 0; gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
...@@ -397,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -397,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret; return ret;
} }
mutex_lock(&drm->struct_mutex);
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
MSM_BO_UNCACHED); MSM_BO_UNCACHED);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(adreno_gpu->memptrs_bo)) { if (IS_ERR(adreno_gpu->memptrs_bo)) {
ret = PTR_ERR(adreno_gpu->memptrs_bo); ret = PTR_ERR(adreno_gpu->memptrs_bo);
adreno_gpu->memptrs_bo = NULL; adreno_gpu->memptrs_bo = NULL;
......
...@@ -982,18 +982,16 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) ...@@ -982,18 +982,16 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
uint64_t iova; uint64_t iova;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
if (IS_ERR(msm_host->tx_gem_obj)) { if (IS_ERR(msm_host->tx_gem_obj)) {
ret = PTR_ERR(msm_host->tx_gem_obj); ret = PTR_ERR(msm_host->tx_gem_obj);
pr_err("%s: failed to allocate gem, %d\n", pr_err("%s: failed to allocate gem, %d\n",
__func__, ret); __func__, ret);
msm_host->tx_gem_obj = NULL; msm_host->tx_gem_obj = NULL;
mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, ret = msm_gem_get_iova(msm_host->tx_gem_obj,
priv->kms->aspace, &iova); priv->kms->aspace, &iova);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ret) { if (ret) {
......
...@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc) ...@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) { if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */ /* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo); drm_gem_object_reference(next_bo);
msm_gem_get_iova_locked(next_bo, kms->aspace, &iova); msm_gem_get_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */ /* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
......
...@@ -528,9 +528,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -528,9 +528,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
mutex_lock(&dev->struct_mutex);
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) { if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo); ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
......
...@@ -336,6 +336,7 @@ static int msm_init_vram(struct drm_device *dev) ...@@ -336,6 +336,7 @@ static int msm_init_vram(struct drm_device *dev)
priv->vram.size = size; priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
spin_lock_init(&priv->vram.lock);
attrs |= DMA_ATTR_NO_KERNEL_MAPPING; attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
attrs |= DMA_ATTR_WRITE_COMBINE; attrs |= DMA_ATTR_WRITE_COMBINE;
......
...@@ -149,6 +149,7 @@ struct msm_drm_private { ...@@ -149,6 +149,7 @@ struct msm_drm_private {
* and position mm_node->start is in # of pages: * and position mm_node->start is in # of pages:
*/ */
struct drm_mm mm; struct drm_mm mm;
spinlock_t lock; /* Protects drm_mm node allocation/removal */
} vram; } vram;
struct notifier_block vmap_notifier; struct notifier_block vmap_notifier;
...@@ -198,8 +199,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj, ...@@ -198,8 +199,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_fault *vmf); int msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj, int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova); struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj, uint64_t msm_gem_iova(struct drm_gem_object *obj,
...@@ -221,13 +220,9 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -221,13 +220,9 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj); int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_get_vaddr(struct drm_gem_object *obj); void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive); struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
...@@ -240,6 +235,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ...@@ -240,6 +235,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle); uint32_t size, uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_new(struct drm_device *dev, struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags); uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt); struct dma_buf *dmabuf, struct sg_table *sgt);
......
...@@ -97,10 +97,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -97,10 +97,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* allocate backing bo */ /* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height; size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index); DBG("allocating %d bytes for fb %d", size, dev->primary->index);
mutex_lock(&dev->struct_mutex);
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
MSM_BO_WC | MSM_BO_STOLEN); MSM_BO_WC | MSM_BO_STOLEN);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) { if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo); ret = PTR_ERR(fbdev->bo);
fbdev->bo = NULL; fbdev->bo = NULL;
...@@ -126,7 +124,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -126,7 +124,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the * in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now: * buffer now:
*/ */
ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr); ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
if (ret) { if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock; goto fail_unlock;
...@@ -155,7 +153,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -155,7 +153,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr; dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo); fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
if (IS_ERR(fbi->screen_base)) { if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base); ret = PTR_ERR(fbi->screen_base);
goto fail_unlock; goto fail_unlock;
......
This diff is collapsed.
...@@ -31,6 +31,7 @@ struct msm_gem_address_space { ...@@ -31,6 +31,7 @@ struct msm_gem_address_space {
* and position mm_node->start is in # of pages: * and position mm_node->start is in # of pages:
*/ */
struct drm_mm mm; struct drm_mm mm;
spinlock_t lock; /* Protects drm_mm node allocation/removal */
struct msm_mmu *mmu; struct msm_mmu *mmu;
struct kref kref; struct kref kref;
}; };
...@@ -89,6 +90,7 @@ struct msm_gem_object { ...@@ -89,6 +90,7 @@ struct msm_gem_object {
* an IOMMU. Also used for stolen/splashscreen buffer. * an IOMMU. Also used for stolen/splashscreen buffer.
*/ */
struct drm_mm_node *vram_node; struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
}; };
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base) #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
...@@ -99,6 +101,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj) ...@@ -99,6 +101,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
static inline bool is_purgeable(struct msm_gem_object *msm_obj) static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{ {
WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
!msm_obj->base.dma_buf && !msm_obj->base.import_attach; !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
} }
...@@ -108,6 +111,25 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) ...@@ -108,6 +111,25 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
return (msm_obj->vmap_count == 0) && msm_obj->vaddr; return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
} }
/* The shrinker can be triggered while we hold objA->lock, and need
* to grab objB->lock to purge it. Lockdep just sees these as a single
* class of lock, so we use subclasses to teach it the difference.
*
* OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
* OBJ_LOCK_SHRINKER is used by shrinker.
*
* It is *essential* that we never go down paths that could trigger the
* shrinker for a purgable object. This is ensured by checking that
* msm_obj->madv == MSM_MADV_WILLNEED.
*/
enum msm_gem_lock {
OBJ_LOCK_NORMAL,
OBJ_LOCK_SHRINKER,
};
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and * associated with the cmdstream submission for synchronization (and
* make it easier to unwind when things go wrong, etc). This only * make it easier to unwind when things go wrong, etc). This only
......
...@@ -20,6 +20,18 @@ ...@@ -20,6 +20,18 @@
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{ {
/* NOTE: we are *closer* to being able to get rid of
* mutex_trylock_recursive().. the msm_gem code itself does
* not need struct_mutex, although codepaths that can trigger
* shrinker are still called in code-paths that hold the
* struct_mutex.
*
* Also, msm_obj->madv is protected by struct_mutex.
*
* The next step is probably split out a seperate lock for
* protecting inactive_list, so that shrinker does not need
* struct_mutex.
*/
switch (mutex_trylock_recursive(&dev->struct_mutex)) { switch (mutex_trylock_recursive(&dev->struct_mutex)) {
case MUTEX_TRYLOCK_FAILED: case MUTEX_TRYLOCK_FAILED:
return false; return false;
...@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (freed >= sc->nr_to_scan) if (freed >= sc->nr_to_scan)
break; break;
if (is_purgeable(msm_obj)) { if (is_purgeable(msm_obj)) {
msm_gem_purge(&msm_obj->base); msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
freed += msm_obj->base.size >> PAGE_SHIFT; freed += msm_obj->base.size >> PAGE_SHIFT;
} }
} }
...@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_vunmapable(msm_obj)) { if (is_vunmapable(msm_obj)) {
msm_gem_vunmap(&msm_obj->base); msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
/* since we don't know any better, lets bail after a few /* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again. * and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything* * Seems better than unmapping *everything*
......
...@@ -245,7 +245,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) ...@@ -245,7 +245,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
uint64_t iova; uint64_t iova;
/* if locking succeeded, pin bo: */ /* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base, ret = msm_gem_get_iova(&msm_obj->base,
submit->gpu->aspace, &iova); submit->gpu->aspace, &iova);
if (ret) if (ret)
...@@ -301,7 +301,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -301,7 +301,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably /* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d.. * to do it page-by-page, w/ kmap() if not vmap()d..
*/ */
ptr = msm_gem_get_vaddr_locked(&obj->base); ptr = msm_gem_get_vaddr(&obj->base);
if (IS_ERR(ptr)) { if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr); ret = PTR_ERR(ptr);
...@@ -359,7 +359,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -359,7 +359,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
} }
out: out:
msm_gem_put_vaddr_locked(&obj->base); msm_gem_put_vaddr(&obj->base);
return ret; return ret;
} }
......
...@@ -50,7 +50,9 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, ...@@ -50,7 +50,9 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
} }
spin_lock(&aspace->lock);
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0; vma->iova = 0;
...@@ -63,10 +65,15 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, ...@@ -63,10 +65,15 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
{ {
int ret; int ret;
if (WARN_ON(drm_mm_node_allocated(&vma->node))) spin_lock(&aspace->lock);
if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
spin_unlock(&aspace->lock);
return 0; return 0;
}
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
if (ret) if (ret)
return ret; return ret;
...@@ -94,6 +101,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, ...@@ -94,6 +101,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
if (!aspace) if (!aspace)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
spin_lock_init(&aspace->lock);
aspace->name = name; aspace->name = name;
aspace->mmu = msm_iommu_new(dev, domain); aspace->mmu = msm_iommu_new(dev, domain);
......
...@@ -497,7 +497,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -497,7 +497,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */ /* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference(&msm_obj->base); drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base, msm_gem_get_iova(&msm_obj->base,
submit->gpu->aspace, &iova); submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
...@@ -661,9 +661,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -661,9 +661,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} }
/* Create ringbuffer: */ /* Create ringbuffer: */
mutex_lock(&drm->struct_mutex);
gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->rb)) { if (IS_ERR(gpu->rb)) {
ret = PTR_ERR(gpu->rb); ret = PTR_ERR(gpu->rb);
gpu->rb = NULL; gpu->rb = NULL;
......
...@@ -268,7 +268,7 @@ static void snapshot_buf(struct msm_rd_state *rd, ...@@ -268,7 +268,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_object *obj = submit->bos[idx].obj; struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf; const char *buf;
buf = msm_gem_get_vaddr_locked(&obj->base); buf = msm_gem_get_vaddr(&obj->base);
if (IS_ERR(buf)) if (IS_ERR(buf))
return; return;
...@@ -283,7 +283,7 @@ static void snapshot_buf(struct msm_rd_state *rd, ...@@ -283,7 +283,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
(uint32_t[3]){ iova, size, iova >> 32 }, 12); (uint32_t[3]){ iova, size, iova >> 32 }, 12);
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(&obj->base); msm_gem_put_vaddr(&obj->base);
} }
/* called under struct_mutex */ /* called under struct_mutex */
......
...@@ -40,7 +40,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) ...@@ -40,7 +40,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
goto fail; goto fail;
} }
ring->start = msm_gem_get_vaddr_locked(ring->bo); ring->start = msm_gem_get_vaddr(ring->bo);
if (IS_ERR(ring->start)) { if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start); ret = PTR_ERR(ring->start);
goto fail; goto fail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment