Commit 15cace59 authored by Ben Skeggs's avatar Ben Skeggs

drm/nvc0/vm: handle bar tlb flushes internally

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent ca97a366
......@@ -117,8 +117,6 @@ int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
struct nouveau_vm **);
void nv04_vmmgr_dtor(struct nouveau_object *);
void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
/* nouveau_vm.c */
int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
u64 mm_offset, u32 block, struct nouveau_vm **);
......
......@@ -51,7 +51,6 @@ nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
return ret;
nouveau_vm_map(vma, mem);
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
return 0;
}
......@@ -68,18 +67,13 @@ nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
return ret;
nouveau_vm_map(vma, mem);
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
return 0;
}
static void
nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
{
struct nvc0_bar_priv *priv = (void *)bar;
int i = !(vma->vm == priv->bar[0].vm);
nouveau_vm_unmap(vma);
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
nouveau_vm_put(vma);
}
......@@ -116,6 +110,8 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
ret = nouveau_gpuobj_new(nv_object(priv), NULL,
(pci_resource_len(pdev, 3) >> 12) * 8,
0x1000, NVOBJ_FLAG_ZERO_ALLOC,
......@@ -150,6 +146,8 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
nouveau_vm_ref(NULL, &vm, NULL);
if (ret)
......
......@@ -159,39 +159,37 @@ nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
}
}
void
nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
static void
nvc0_vm_flush(struct nouveau_vm *vm)
{
struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
struct nvc0_vmmgr_priv *priv = (void *)vm->vmm;
struct nouveau_vm_pgd *vpgd;
u32 type;
type = 0x00000001; /* PAGE_ALL */
if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
mutex_lock(&nv_subdev(priv)->mutex);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
*/
mutex_lock(&nv_subdev(priv)->mutex);
if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
nv_rd32(subdev, 0x100c80), type);
if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
nv_error(priv, "vm timeout 0: 0x%08x %d\n",
nv_rd32(priv, 0x100c80), type);
}
nv_wr32(subdev, 0x100cb8, addr >> 8);
nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
nv_wr32(priv, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */
if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
nv_rd32(subdev, 0x100c80), type);
if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
nv_error(priv, "vm timeout 1: 0x%08x %d\n",
nv_rd32(priv, 0x100c80), type);
}
mutex_unlock(&nv_subdev(priv)->mutex);
}
static void
nvc0_vm_flush(struct nouveau_vm *vm)
{
struct nouveau_vm_pgd *vpgd;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
}
mutex_unlock(&nv_subdev(priv)->mutex);
}
static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment