Commit 0e08270a authored by Sushmita Susheelendra's avatar Sushmita Susheelendra Committed by Rob Clark

drm/msm: Separate locking of buffer resources from struct_mutex

Buffer object specific resources like pages, domains, sg list
need not be protected with struct_mutex. They can be protected
with a buffer object level lock. This simplifies locking and
makes it easier to avoid potential recursive locking scenarios
for SVM involving mmap_sem and struct_mutex. This also removes
unnecessary serialization when creating buffer objects, and also
between buffer object creation and GPU command submission.
Signed-off-by: default avatarSushmita Susheelendra <ssusheel@codeaurora.org>
[robclark: squash in handling new locking for shrinker]
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 816fa34c
...@@ -297,18 +297,18 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, ...@@ -297,18 +297,18 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
struct drm_gem_object *bo; struct drm_gem_object *bo;
void *ptr; void *ptr;
bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED); bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
if (IS_ERR(bo)) if (IS_ERR(bo))
return bo; return bo;
ptr = msm_gem_get_vaddr_locked(bo); ptr = msm_gem_get_vaddr(bo);
if (!ptr) { if (!ptr) {
drm_gem_object_unreference(bo); drm_gem_object_unreference(bo);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (iova) { if (iova) {
int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova); int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
if (ret) { if (ret) {
drm_gem_object_unreference(bo); drm_gem_object_unreference(bo);
...@@ -318,7 +318,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, ...@@ -318,7 +318,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
memcpy(ptr, &fw->data[4], fw->size - 4); memcpy(ptr, &fw->data[4], fw->size - 4);
msm_gem_put_vaddr_locked(bo); msm_gem_put_vaddr(bo);
return bo; return bo;
} }
......
...@@ -294,15 +294,15 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -294,15 +294,15 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/ */
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED); a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
if (IS_ERR(a5xx_gpu->gpmu_bo)) if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err; goto err;
if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace, if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
&a5xx_gpu->gpmu_iova)) &a5xx_gpu->gpmu_iova))
goto err; goto err;
ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo); ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
if (!ptr) if (!ptr)
goto err; goto err;
...@@ -321,7 +321,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -321,7 +321,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
cmds_size -= _size; cmds_size -= _size;
} }
msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo); msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords; a5xx_gpu->gpmu_dwords = dwords;
goto out; goto out;
......
...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name); DBG("%s", gpu->name);
ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) { if (ret) {
gpu->rb_iova = 0; gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
...@@ -397,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -397,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret; return ret;
} }
mutex_lock(&drm->struct_mutex);
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
MSM_BO_UNCACHED); MSM_BO_UNCACHED);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(adreno_gpu->memptrs_bo)) { if (IS_ERR(adreno_gpu->memptrs_bo)) {
ret = PTR_ERR(adreno_gpu->memptrs_bo); ret = PTR_ERR(adreno_gpu->memptrs_bo);
adreno_gpu->memptrs_bo = NULL; adreno_gpu->memptrs_bo = NULL;
......
...@@ -982,18 +982,16 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) ...@@ -982,18 +982,16 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
uint64_t iova; uint64_t iova;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
if (IS_ERR(msm_host->tx_gem_obj)) { if (IS_ERR(msm_host->tx_gem_obj)) {
ret = PTR_ERR(msm_host->tx_gem_obj); ret = PTR_ERR(msm_host->tx_gem_obj);
pr_err("%s: failed to allocate gem, %d\n", pr_err("%s: failed to allocate gem, %d\n",
__func__, ret); __func__, ret);
msm_host->tx_gem_obj = NULL; msm_host->tx_gem_obj = NULL;
mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, ret = msm_gem_get_iova(msm_host->tx_gem_obj,
priv->kms->aspace, &iova); priv->kms->aspace, &iova);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ret) { if (ret) {
......
...@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc) ...@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) { if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */ /* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo); drm_gem_object_reference(next_bo);
msm_gem_get_iova_locked(next_bo, kms->aspace, &iova); msm_gem_get_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */ /* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
......
...@@ -528,9 +528,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -528,9 +528,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
mutex_lock(&dev->struct_mutex);
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) { if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo); ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
......
...@@ -336,6 +336,7 @@ static int msm_init_vram(struct drm_device *dev) ...@@ -336,6 +336,7 @@ static int msm_init_vram(struct drm_device *dev)
priv->vram.size = size; priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
spin_lock_init(&priv->vram.lock);
attrs |= DMA_ATTR_NO_KERNEL_MAPPING; attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
attrs |= DMA_ATTR_WRITE_COMBINE; attrs |= DMA_ATTR_WRITE_COMBINE;
......
...@@ -149,6 +149,7 @@ struct msm_drm_private { ...@@ -149,6 +149,7 @@ struct msm_drm_private {
* and position mm_node->start is in # of pages: * and position mm_node->start is in # of pages:
*/ */
struct drm_mm mm; struct drm_mm mm;
spinlock_t lock; /* Protects drm_mm node allocation/removal */
} vram; } vram;
struct notifier_block vmap_notifier; struct notifier_block vmap_notifier;
...@@ -198,8 +199,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj, ...@@ -198,8 +199,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_fault *vmf); int msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj, int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova); struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj, uint64_t msm_gem_iova(struct drm_gem_object *obj,
...@@ -221,13 +220,9 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -221,13 +220,9 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj); int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_get_vaddr(struct drm_gem_object *obj); void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive); struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
...@@ -240,6 +235,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ...@@ -240,6 +235,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle); uint32_t size, uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_new(struct drm_device *dev, struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags); uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt); struct dma_buf *dmabuf, struct sg_table *sgt);
......
...@@ -97,10 +97,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -97,10 +97,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* allocate backing bo */ /* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height; size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index); DBG("allocating %d bytes for fb %d", size, dev->primary->index);
mutex_lock(&dev->struct_mutex);
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
MSM_BO_WC | MSM_BO_STOLEN); MSM_BO_WC | MSM_BO_STOLEN);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) { if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo); ret = PTR_ERR(fbdev->bo);
fbdev->bo = NULL; fbdev->bo = NULL;
...@@ -126,7 +124,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -126,7 +124,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the * in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now: * buffer now:
*/ */
ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr); ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
if (ret) { if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock; goto fail_unlock;
...@@ -155,7 +153,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -155,7 +153,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr; dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo); fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
if (IS_ERR(fbi->screen_base)) { if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base); ret = PTR_ERR(fbi->screen_base);
goto fail_unlock; goto fail_unlock;
......
...@@ -26,6 +26,9 @@ ...@@ -26,6 +26,9 @@
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_mmu.h" #include "msm_mmu.h"
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
static dma_addr_t physaddr(struct drm_gem_object *obj) static dma_addr_t physaddr(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
...@@ -41,8 +44,7 @@ static bool use_pages(struct drm_gem_object *obj) ...@@ -41,8 +44,7 @@ static bool use_pages(struct drm_gem_object *obj)
} }
/* allocate pages from VRAM carveout, used when no IOMMU: */ /* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
int npages)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private; struct msm_drm_private *priv = obj->dev->dev_private;
...@@ -54,7 +56,9 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, ...@@ -54,7 +56,9 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
if (!p) if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
spin_lock(&priv->vram.lock);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
spin_unlock(&priv->vram.lock);
if (ret) { if (ret) {
kvfree(p); kvfree(p);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -69,7 +73,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, ...@@ -69,7 +73,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
return p; return p;
} }
/* called with dev->struct_mutex held */
static struct page **get_pages(struct drm_gem_object *obj) static struct page **get_pages(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
...@@ -109,6 +112,18 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -109,6 +112,18 @@ static struct page **get_pages(struct drm_gem_object *obj)
return msm_obj->pages; return msm_obj->pages;
} }
static void put_pages_vram(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
spin_lock(&priv->vram.lock);
drm_mm_remove_node(msm_obj->vram_node);
spin_unlock(&priv->vram.lock);
kvfree(msm_obj->pages);
}
static void put_pages(struct drm_gem_object *obj) static void put_pages(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
...@@ -125,10 +140,8 @@ static void put_pages(struct drm_gem_object *obj) ...@@ -125,10 +140,8 @@ static void put_pages(struct drm_gem_object *obj)
if (use_pages(obj)) if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false); drm_gem_put_pages(obj, msm_obj->pages, true, false);
else { else
drm_mm_remove_node(msm_obj->vram_node); put_pages_vram(obj);
kvfree(msm_obj->pages);
}
msm_obj->pages = NULL; msm_obj->pages = NULL;
} }
...@@ -136,11 +149,18 @@ static void put_pages(struct drm_gem_object *obj) ...@@ -136,11 +149,18 @@ static void put_pages(struct drm_gem_object *obj)
struct page **msm_gem_get_pages(struct drm_gem_object *obj) struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p; struct page **p;
mutex_lock(&dev->struct_mutex);
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj); p = get_pages(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&msm_obj->lock);
return p; return p;
} }
...@@ -195,28 +215,25 @@ int msm_gem_fault(struct vm_fault *vmf) ...@@ -195,28 +215,25 @@ int msm_gem_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = dev->dev_private;
struct page **pages; struct page **pages;
unsigned long pfn; unsigned long pfn;
pgoff_t pgoff; pgoff_t pgoff;
int ret; int ret;
/* This should only happen if userspace tries to pass a mmap'd /*
* but unfaulted gem bo vaddr into submit ioctl, triggering * vm_ops.open/drm_gem_mmap_obj and close get and put
* a page fault while struct_mutex is already held. This is * a reference on obj. So, we dont need to hold one here.
* not a valid use-case so just bail.
*/
if (priv->struct_mutex_task == current)
return VM_FAULT_SIGBUS;
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/ */
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&msm_obj->lock);
if (ret) if (ret)
goto out; goto out;
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
return VM_FAULT_SIGBUS;
}
/* make sure we have pages attached now */ /* make sure we have pages attached now */
pages = get_pages(obj); pages = get_pages(obj);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
...@@ -235,7 +252,7 @@ int msm_gem_fault(struct vm_fault *vmf) ...@@ -235,7 +252,7 @@ int msm_gem_fault(struct vm_fault *vmf)
ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock: out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&msm_obj->lock);
out: out:
switch (ret) { switch (ret) {
case -EAGAIN: case -EAGAIN:
...@@ -259,9 +276,10 @@ int msm_gem_fault(struct vm_fault *vmf) ...@@ -259,9 +276,10 @@ int msm_gem_fault(struct vm_fault *vmf)
static uint64_t mmap_offset(struct drm_gem_object *obj) static uint64_t mmap_offset(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&msm_obj->lock));
/* Make it mmapable */ /* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj); ret = drm_gem_create_mmap_offset(obj);
...@@ -277,9 +295,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) ...@@ -277,9 +295,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{ {
uint64_t offset; uint64_t offset;
mutex_lock(&obj->dev->struct_mutex); struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
offset = mmap_offset(obj); offset = mmap_offset(obj);
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&msm_obj->lock);
return offset; return offset;
} }
...@@ -289,6 +309,8 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, ...@@ -289,6 +309,8 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
WARN_ON(!mutex_is_locked(&msm_obj->lock));
vma = kzalloc(sizeof(*vma), GFP_KERNEL); vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) if (!vma)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -306,7 +328,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, ...@@ -306,7 +328,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry(vma, &msm_obj->vmas, list) { list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace) if (vma->aspace == aspace)
...@@ -325,13 +347,14 @@ static void del_vma(struct msm_gem_vma *vma) ...@@ -325,13 +347,14 @@ static void del_vma(struct msm_gem_vma *vma)
kfree(vma); kfree(vma);
} }
/* Called with msm_obj->lock locked */
static void static void
put_iova(struct drm_gem_object *obj) put_iova(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp; struct msm_gem_vma *vma, *tmp;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
...@@ -339,21 +362,20 @@ put_iova(struct drm_gem_object *obj) ...@@ -339,21 +362,20 @@ put_iova(struct drm_gem_object *obj)
} }
} }
/* should be called under struct_mutex.. although it can be called /* get iova, taking a reference. Should have a matching put */
* from atomic context without struct_mutex to acquire an extra int msm_gem_get_iova(struct drm_gem_object *obj,
* iova ref if you know one is already held.
*
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova) struct msm_gem_address_space *aspace, uint64_t *iova)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
int ret = 0; int ret = 0;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
return -EBUSY;
}
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
...@@ -377,24 +399,14 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, ...@@ -377,24 +399,14 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
} }
*iova = vma->iova; *iova = vma->iova;
mutex_unlock(&msm_obj->lock);
return 0; return 0;
fail: fail:
del_vma(vma); del_vma(vma);
return ret; mutex_unlock(&msm_obj->lock);
}
/* get iova, taking a reference. Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
int ret;
mutex_lock(&obj->dev->struct_mutex);
ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret; return ret;
} }
...@@ -404,11 +416,12 @@ int msm_gem_get_iova(struct drm_gem_object *obj, ...@@ -404,11 +416,12 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
uint64_t msm_gem_iova(struct drm_gem_object *obj, uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace) struct msm_gem_address_space *aspace)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&msm_obj->lock);
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&msm_obj->lock);
WARN_ON(!vma); WARN_ON(!vma);
return vma ? vma->iova : 0; return vma ? vma->iova : 0;
...@@ -455,45 +468,57 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, ...@@ -455,45 +468,57 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return ret; return ret;
} }
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); int ret = 0;
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
}
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj->lock.
* This guarantees that we won't try to msm_gem_vunmap() this
* same object from within the vmap() call (while we already
* hold msm_obj->lock)
*/
msm_obj->vmap_count++;
if (!msm_obj->vaddr) { if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj); struct page **pages = get_pages(obj);
if (IS_ERR(pages)) if (IS_ERR(pages)) {
return ERR_CAST(pages); ret = PTR_ERR(pages);
goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL)); VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (msm_obj->vaddr == NULL) if (msm_obj->vaddr == NULL) {
return ERR_PTR(-ENOMEM); ret = -ENOMEM;
goto fail;
}
} }
msm_obj->vmap_count++;
mutex_unlock(&msm_obj->lock);
return msm_obj->vaddr; return msm_obj->vaddr;
}
void *msm_gem_get_vaddr(struct drm_gem_object *obj) fail:
{ msm_obj->vmap_count--;
void *ret; mutex_unlock(&msm_obj->lock);
mutex_lock(&obj->dev->struct_mutex); return ERR_PTR(ret);
ret = msm_gem_get_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
} }
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) void msm_gem_put_vaddr(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
mutex_lock(&msm_obj->lock);
WARN_ON(msm_obj->vmap_count < 1); WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--; msm_obj->vmap_count--;
} mutex_unlock(&msm_obj->lock);
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
mutex_lock(&obj->dev->struct_mutex);
msm_gem_put_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
} }
/* Update madvise status, returns true if not purged, else /* Update madvise status, returns true if not purged, else
...@@ -503,15 +528,21 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) ...@@ -503,15 +528,21 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
if (msm_obj->madv != __MSM_MADV_PURGED) if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv; msm_obj->madv = madv;
return (msm_obj->madv != __MSM_MADV_PURGED); madv = msm_obj->madv;
mutex_unlock(&msm_obj->lock);
return (madv != __MSM_MADV_PURGED);
} }
void msm_gem_purge(struct drm_gem_object *obj) void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
...@@ -520,9 +551,11 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -520,9 +551,11 @@ void msm_gem_purge(struct drm_gem_object *obj)
WARN_ON(!is_purgeable(msm_obj)); WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach); WARN_ON(obj->import_attach);
mutex_lock_nested(&msm_obj->lock, subclass);
put_iova(obj); put_iova(obj);
msm_gem_vunmap(obj); msm_gem_vunmap_locked(obj);
put_pages(obj); put_pages(obj);
...@@ -540,12 +573,16 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -540,12 +573,16 @@ void msm_gem_purge(struct drm_gem_object *obj)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1); 0, (loff_t)-1);
mutex_unlock(&msm_obj->lock);
} }
void msm_gem_vunmap(struct drm_gem_object *obj) static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&msm_obj->lock));
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return; return;
...@@ -553,6 +590,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj) ...@@ -553,6 +590,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL; msm_obj->vaddr = NULL;
} }
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock_nested(&msm_obj->lock, subclass);
msm_gem_vunmap_locked(obj);
mutex_unlock(&msm_obj->lock);
}
/* must be called before _move_to_active().. */ /* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive) struct msm_fence_context *fctx, bool exclusive)
...@@ -674,7 +720,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -674,7 +720,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node); uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv; const char *madv;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); mutex_lock(&msm_obj->lock);
switch (msm_obj->madv) { switch (msm_obj->madv) {
case __MSM_MADV_PURGED: case __MSM_MADV_PURGED:
...@@ -715,6 +761,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -715,6 +761,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
if (fence) if (fence)
describe_fence(fence, "Exclusive", m); describe_fence(fence, "Exclusive", m);
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&msm_obj->lock);
} }
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
...@@ -747,6 +795,8 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -747,6 +795,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list); list_del(&msm_obj->mm_list);
mutex_lock(&msm_obj->lock);
put_iova(obj); put_iova(obj);
if (obj->import_attach) { if (obj->import_attach) {
...@@ -761,7 +811,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -761,7 +811,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, msm_obj->sgt); drm_prime_gem_destroy(obj, msm_obj->sgt);
} else { } else {
msm_gem_vunmap(obj); msm_gem_vunmap_locked(obj);
put_pages(obj); put_pages(obj);
} }
...@@ -770,6 +820,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -770,6 +820,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_gem_object_release(obj); drm_gem_object_release(obj);
mutex_unlock(&msm_obj->lock);
kfree(msm_obj); kfree(msm_obj);
} }
...@@ -780,14 +831,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ...@@ -780,14 +831,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *obj; struct drm_gem_object *obj;
int ret; int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
obj = msm_gem_new(dev, size, flags); obj = msm_gem_new(dev, size, flags);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
...@@ -802,13 +847,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ...@@ -802,13 +847,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
static int msm_gem_new_impl(struct drm_device *dev, static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags, uint32_t size, uint32_t flags,
struct reservation_object *resv, struct reservation_object *resv,
struct drm_gem_object **obj) struct drm_gem_object **obj,
bool struct_mutex_locked)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj; struct msm_gem_object *msm_obj;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
switch (flags & MSM_BO_CACHE_MASK) { switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED: case MSM_BO_UNCACHED:
case MSM_BO_CACHED: case MSM_BO_CACHED:
...@@ -824,6 +868,8 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -824,6 +868,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (!msm_obj) if (!msm_obj)
return -ENOMEM; return -ENOMEM;
mutex_init(&msm_obj->lock);
msm_obj->flags = flags; msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED; msm_obj->madv = MSM_MADV_WILLNEED;
...@@ -837,23 +883,28 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -837,23 +883,28 @@ static int msm_gem_new_impl(struct drm_device *dev,
INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas); INIT_LIST_HEAD(&msm_obj->vmas);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list); if (struct_mutex_locked) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
} else {
mutex_lock(&dev->struct_mutex);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&dev->struct_mutex);
}
*obj = &msm_obj->base; *obj = &msm_obj->base;
return 0; return 0;
} }
struct drm_gem_object *msm_gem_new(struct drm_device *dev, static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags) uint32_t size, uint32_t flags, bool struct_mutex_locked)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct drm_gem_object *obj = NULL; struct drm_gem_object *obj = NULL;
bool use_vram = false; bool use_vram = false;
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (!iommu_present(&platform_bus_type)) if (!iommu_present(&platform_bus_type))
...@@ -870,7 +921,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -870,7 +921,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (size == 0) if (size == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
if (ret) if (ret)
goto fail; goto fail;
...@@ -904,10 +955,22 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -904,10 +955,22 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
return obj; return obj;
fail: fail:
drm_gem_object_unreference(obj); drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags)
{
return _msm_gem_new(dev, size, flags, true);
}
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags)
{
return _msm_gem_new(dev, size, flags, false);
}
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt) struct dma_buf *dmabuf, struct sg_table *sgt)
{ {
...@@ -924,11 +987,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -924,11 +987,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size); size = PAGE_ALIGN(dmabuf->size);
/* Take mutex so we can modify the inactive list in msm_gem_new_impl */ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
mutex_lock(&dev->struct_mutex);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
mutex_unlock(&dev->struct_mutex);
if (ret) if (ret)
goto fail; goto fail;
...@@ -937,17 +996,22 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -937,17 +996,22 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
npages = size / PAGE_SIZE; npages = size / PAGE_SIZE;
msm_obj = to_msm_bo(obj); msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
msm_obj->sgt = sgt; msm_obj->sgt = sgt;
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!msm_obj->pages) { if (!msm_obj->pages) {
mutex_unlock(&msm_obj->lock);
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
} }
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
if (ret) if (ret) {
mutex_unlock(&msm_obj->lock);
goto fail; goto fail;
}
mutex_unlock(&msm_obj->lock);
return obj; return obj;
fail: fail:
......
...@@ -31,6 +31,7 @@ struct msm_gem_address_space { ...@@ -31,6 +31,7 @@ struct msm_gem_address_space {
* and position mm_node->start is in # of pages: * and position mm_node->start is in # of pages:
*/ */
struct drm_mm mm; struct drm_mm mm;
spinlock_t lock; /* Protects drm_mm node allocation/removal */
struct msm_mmu *mmu; struct msm_mmu *mmu;
struct kref kref; struct kref kref;
}; };
...@@ -89,6 +90,7 @@ struct msm_gem_object { ...@@ -89,6 +90,7 @@ struct msm_gem_object {
* an IOMMU. Also used for stolen/splashscreen buffer. * an IOMMU. Also used for stolen/splashscreen buffer.
*/ */
struct drm_mm_node *vram_node; struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
}; };
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base) #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
...@@ -99,6 +101,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj) ...@@ -99,6 +101,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
static inline bool is_purgeable(struct msm_gem_object *msm_obj) static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{ {
WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
!msm_obj->base.dma_buf && !msm_obj->base.import_attach; !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
} }
...@@ -108,6 +111,25 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) ...@@ -108,6 +111,25 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
return (msm_obj->vmap_count == 0) && msm_obj->vaddr; return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
} }
/* The shrinker can be triggered while we hold objA->lock, and need
* to grab objB->lock to purge it. Lockdep just sees these as a single
* class of lock, so we use subclasses to teach it the difference.
*
* OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
* OBJ_LOCK_SHRINKER is used by shrinker.
*
* It is *essential* that we never go down paths that could trigger the
* shrinker for a purgable object. This is ensured by checking that
* msm_obj->madv == MSM_MADV_WILLNEED.
*/
enum msm_gem_lock {
OBJ_LOCK_NORMAL,
OBJ_LOCK_SHRINKER,
};
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and * associated with the cmdstream submission for synchronization (and
* make it easier to unwind when things go wrong, etc). This only * make it easier to unwind when things go wrong, etc). This only
......
...@@ -20,6 +20,18 @@ ...@@ -20,6 +20,18 @@
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{ {
/* NOTE: we are *closer* to being able to get rid of
* mutex_trylock_recursive().. the msm_gem code itself does
* not need struct_mutex, although codepaths that can trigger
* shrinker are still called in code-paths that hold the
* struct_mutex.
*
* Also, msm_obj->madv is protected by struct_mutex.
*
* The next step is probably split out a seperate lock for
* protecting inactive_list, so that shrinker does not need
* struct_mutex.
*/
switch (mutex_trylock_recursive(&dev->struct_mutex)) { switch (mutex_trylock_recursive(&dev->struct_mutex)) {
case MUTEX_TRYLOCK_FAILED: case MUTEX_TRYLOCK_FAILED:
return false; return false;
...@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (freed >= sc->nr_to_scan) if (freed >= sc->nr_to_scan)
break; break;
if (is_purgeable(msm_obj)) { if (is_purgeable(msm_obj)) {
msm_gem_purge(&msm_obj->base); msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
freed += msm_obj->base.size >> PAGE_SHIFT; freed += msm_obj->base.size >> PAGE_SHIFT;
} }
} }
...@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_vunmapable(msm_obj)) { if (is_vunmapable(msm_obj)) {
msm_gem_vunmap(&msm_obj->base); msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
/* since we don't know any better, lets bail after a few /* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again. * and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything* * Seems better than unmapping *everything*
......
...@@ -245,7 +245,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) ...@@ -245,7 +245,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
uint64_t iova; uint64_t iova;
/* if locking succeeded, pin bo: */ /* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base, ret = msm_gem_get_iova(&msm_obj->base,
submit->gpu->aspace, &iova); submit->gpu->aspace, &iova);
if (ret) if (ret)
...@@ -301,7 +301,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -301,7 +301,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably /* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d.. * to do it page-by-page, w/ kmap() if not vmap()d..
*/ */
ptr = msm_gem_get_vaddr_locked(&obj->base); ptr = msm_gem_get_vaddr(&obj->base);
if (IS_ERR(ptr)) { if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr); ret = PTR_ERR(ptr);
...@@ -359,7 +359,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -359,7 +359,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
} }
out: out:
msm_gem_put_vaddr_locked(&obj->base); msm_gem_put_vaddr(&obj->base);
return ret; return ret;
} }
......
...@@ -50,7 +50,9 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, ...@@ -50,7 +50,9 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
} }
spin_lock(&aspace->lock);
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0; vma->iova = 0;
...@@ -63,10 +65,15 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, ...@@ -63,10 +65,15 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
{ {
int ret; int ret;
if (WARN_ON(drm_mm_node_allocated(&vma->node))) spin_lock(&aspace->lock);
if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
spin_unlock(&aspace->lock);
return 0; return 0;
}
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
if (ret) if (ret)
return ret; return ret;
...@@ -94,6 +101,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, ...@@ -94,6 +101,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
if (!aspace) if (!aspace)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
spin_lock_init(&aspace->lock);
aspace->name = name; aspace->name = name;
aspace->mmu = msm_iommu_new(dev, domain); aspace->mmu = msm_iommu_new(dev, domain);
......
...@@ -497,7 +497,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -497,7 +497,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */ /* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference(&msm_obj->base); drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base, msm_gem_get_iova(&msm_obj->base,
submit->gpu->aspace, &iova); submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
...@@ -661,9 +661,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -661,9 +661,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} }
/* Create ringbuffer: */ /* Create ringbuffer: */
mutex_lock(&drm->struct_mutex);
gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->rb)) { if (IS_ERR(gpu->rb)) {
ret = PTR_ERR(gpu->rb); ret = PTR_ERR(gpu->rb);
gpu->rb = NULL; gpu->rb = NULL;
......
...@@ -268,7 +268,7 @@ static void snapshot_buf(struct msm_rd_state *rd, ...@@ -268,7 +268,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_object *obj = submit->bos[idx].obj; struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf; const char *buf;
buf = msm_gem_get_vaddr_locked(&obj->base); buf = msm_gem_get_vaddr(&obj->base);
if (IS_ERR(buf)) if (IS_ERR(buf))
return; return;
...@@ -283,7 +283,7 @@ static void snapshot_buf(struct msm_rd_state *rd, ...@@ -283,7 +283,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
(uint32_t[3]){ iova, size, iova >> 32 }, 12); (uint32_t[3]){ iova, size, iova >> 32 }, 12);
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(&obj->base); msm_gem_put_vaddr(&obj->base);
} }
/* called under struct_mutex */ /* called under struct_mutex */
......
...@@ -40,7 +40,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) ...@@ -40,7 +40,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
goto fail; goto fail;
} }
ring->start = msm_gem_get_vaddr_locked(ring->bo); ring->start = msm_gem_get_vaddr(ring->bo);
if (IS_ERR(ring->start)) { if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start); ret = PTR_ERR(ring->start);
goto fail; goto fail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment