Commit e5ffa727 authored by Thierry Reding's avatar Thierry Reding Committed by Ben Skeggs

drm/nouveau/imem/gk20a: Turn instmem lock into mutex

The gk20a implementation of instance memory uses vmap()/vunmap() to map
memory regions into the kernel's virtual address space. These functions
may sleep, so protecting them by a spin lock is not safe. This triggers
a warning if the DEBUG_ATOMIC_SLEEP Kconfig option is enabled. Fix this
by using a mutex instead.
Signed-off-by: default avatarThierry Reding <treding@nvidia.com>
Reviewed-by: default avatarAlexandre Courbot <acourbot@nvidia.com>
Tested-by: default avatarAlexandre Courbot <acourbot@nvidia.com>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 2ebd42bc
...@@ -94,7 +94,7 @@ struct gk20a_instmem { ...@@ -94,7 +94,7 @@ struct gk20a_instmem {
struct nvkm_instmem base; struct nvkm_instmem base;
/* protects vaddr_* and gk20a_instobj::vaddr* */ /* protects vaddr_* and gk20a_instobj::vaddr* */
spinlock_t lock; struct mutex lock;
/* CPU mappings LRU */ /* CPU mappings LRU */
unsigned int vaddr_use; unsigned int vaddr_use;
...@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) ...@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem; struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory); const u64 size = nvkm_memory_size(memory);
unsigned long flags;
nvkm_ltc_flush(ltc); nvkm_ltc_flush(ltc);
spin_lock_irqsave(&imem->lock, flags); mutex_lock(&imem->lock);
if (node->base.vaddr) { if (node->base.vaddr) {
if (!node->use_cpt) { if (!node->use_cpt) {
...@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) ...@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
out: out:
node->use_cpt++; node->use_cpt++;
spin_unlock_irqrestore(&imem->lock, flags); mutex_unlock(&imem->lock);
return node->base.vaddr; return node->base.vaddr;
} }
...@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory) ...@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem; struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
unsigned long flags;
spin_lock_irqsave(&imem->lock, flags); mutex_lock(&imem->lock);
/* we should at least have one user to release... */ /* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0)) if (WARN_ON(node->use_cpt == 0))
...@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory) ...@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
list_add_tail(&node->vaddr_node, &imem->vaddr_lru); list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
out: out:
spin_unlock_irqrestore(&imem->lock, flags); mutex_unlock(&imem->lock);
wmb(); wmb();
nvkm_ltc_invalidate(ltc); nvkm_ltc_invalidate(ltc);
...@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) ...@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem; struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev; struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r = node->base.mem.mem; struct nvkm_mm_node *r = node->base.mem.mem;
unsigned long flags;
int i; int i;
if (unlikely(!r)) if (unlikely(!r))
goto out; goto out;
spin_lock_irqsave(&imem->lock, flags); mutex_lock(&imem->lock);
/* vaddr has already been recycled */ /* vaddr has already been recycled */
if (node->base.vaddr) if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node); gk20a_instobj_iommu_recycle_vaddr(node);
spin_unlock_irqrestore(&imem->lock, flags); mutex_unlock(&imem->lock);
/* clear IOMMU bit to unmap pages */ /* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
...@@ -571,7 +568,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index, ...@@ -571,7 +568,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
spin_lock_init(&imem->lock); mutex_init(&imem->lock);
*pimem = &imem->base; *pimem = &imem->base;
/* do not allow more than 1MB of CPU-mapped instmem */ /* do not allow more than 1MB of CPU-mapped instmem */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment