Commit 5ec69c91 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: serialise mmu invalidations with private mutex

nvkm_subdev.mutex is going away.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent dbffdff7
......@@ -117,6 +117,8 @@ struct nvkm_mmu {
struct list_head list;
} ptc, ptp;
struct mutex mutex; /* serialises mmu invalidations */
struct nvkm_device_oclass user;
};
......
......@@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
nvkm_vmm_unref(&mmu->vmm);
nvkm_mmu_ptc_fini(mmu);
mutex_destroy(&mmu->mutex);
return mmu;
}
......@@ -420,6 +421,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
mmu->func = func;
mmu->dma_bits = func->dma_bits;
nvkm_mmu_ptc_init(mmu);
mutex_init(&mmu->mutex);
mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user;
}
......
......@@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
void
gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
u64 addr = 0;
mutex_lock(&subdev->mutex);
mutex_lock(&vmm->mmu->mutex);
/* Looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases.
*/
......@@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
if (nvkm_rd32(device, 0x100c80) & 0x00008000)
break;
);
mutex_unlock(&subdev->mutex);
mutex_unlock(&vmm->mmu->mutex);
}
void
......
......@@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = {
static void
nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_device *device = vmm->mmu->subdev.device;
mutex_lock(&subdev->mutex);
mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0x100810, 0x00000022);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100810) & 0x00000020)
break;
);
nvkm_wr32(device, 0x100810, 0x00000000);
mutex_unlock(&subdev->mutex);
mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
......
......@@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
struct nvkm_device *device = subdev->device;
int i, id;
mutex_lock(&subdev->mutex);
mutex_lock(&vmm->mmu->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
if (!atomic_read(&vmm->engref[i]))
continue;
......@@ -220,7 +220,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
nvkm_error(subdev, "%s mmu invalidate timeout\n",
nvkm_subdev_name[i]);
}
mutex_unlock(&subdev->mutex);
mutex_unlock(&vmm->mmu->mutex);
}
int
......
......@@ -26,15 +26,14 @@
static void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_device *device = vmm->mmu->subdev.device;
u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
mutex_lock(&subdev->mutex);
mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
nvkm_wr32(device, 0xb830a4, 0x00000000);
......@@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
break;
);
mutex_unlock(&subdev->mutex);
mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment