Commit 3ee6f5b5 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau: store a pointer to vm in nouveau_cli

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent b12f0ae9
...@@ -196,8 +196,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, ...@@ -196,8 +196,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
int lpg_shift = 12; int lpg_shift = 12;
int max_size; int max_size;
if (drm->client.base.vm) if (drm->client.vm)
lpg_shift = drm->client.base.vm->vmm->lpg_shift; lpg_shift = drm->client.vm->vmm->lpg_shift;
max_size = INT_MAX & ~((1 << lpg_shift) - 1); max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) { if (size <= 0 || size > max_size) {
...@@ -219,9 +219,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, ...@@ -219,9 +219,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.bdev = &drm->ttm.bdev; nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->page_shift = 12; nvbo->page_shift = 12;
if (drm->client.base.vm) { if (drm->client.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
} }
nouveau_bo_fixup_align(nvbo, flags, &align, &size); nouveau_bo_fixup_align(nvbo, flags, &align, &size);
...@@ -929,12 +929,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, ...@@ -929,12 +929,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
u64 size = (u64)mem->num_pages << PAGE_SHIFT; u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret; int ret;
ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[0]); NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret) if (ret)
return ret; return ret;
ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[1]); NV_MEM_ACCESS_RW, &old_node->vma[1]);
if (ret) { if (ret) {
nouveau_vm_put(&old_node->vma[0]); nouveau_vm_put(&old_node->vma[0]);
......
...@@ -415,9 +415,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -415,9 +415,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (device->card_type >= NV_50) { if (device->card_type >= NV_50) {
ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
0x1000, &drm->client.base.vm); 0x1000, &drm->client.vm);
if (ret) if (ret)
goto fail_device; goto fail_device;
drm->client.base.vm = drm->client.vm;
} }
ret = nouveau_ttm_init(drm); ret = nouveau_ttm_init(drm);
...@@ -725,11 +727,13 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) ...@@ -725,11 +727,13 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (nv_device(drm->device)->card_type >= NV_50) { if (nv_device(drm->device)->card_type >= NV_50) {
ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
0x1000, &cli->base.vm); 0x1000, &cli->vm);
if (ret) { if (ret) {
nouveau_cli_destroy(cli); nouveau_cli_destroy(cli);
goto out_suspend; goto out_suspend;
} }
cli->base.vm = cli->vm;
} }
fpriv->driver_priv = cli; fpriv->driver_priv = cli;
......
...@@ -65,6 +65,7 @@ enum nouveau_drm_handle { ...@@ -65,6 +65,7 @@ enum nouveau_drm_handle {
struct nouveau_cli { struct nouveau_cli {
struct nouveau_client base; struct nouveau_client base;
struct nouveau_vm *vm; /*XXX*/
struct list_head head; struct list_head head;
struct mutex mutex; struct mutex mutex;
void *abi16; void *abi16;
......
...@@ -58,14 +58,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -58,14 +58,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma; struct nouveau_vma *vma;
int ret; int ret;
if (!cli->base.vm) if (!cli->vm)
return 0; return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret) if (ret)
return ret; return ret;
vma = nouveau_bo_vma_find(nvbo, cli->base.vm); vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma) { if (!vma) {
vma = kzalloc(sizeof(*vma), GFP_KERNEL); vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) { if (!vma) {
...@@ -73,7 +73,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -73,7 +73,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out; goto out;
} }
ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
if (ret) { if (ret) {
kfree(vma); kfree(vma);
goto out; goto out;
...@@ -129,14 +129,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -129,14 +129,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma; struct nouveau_vma *vma;
int ret; int ret;
if (!cli->base.vm) if (!cli->vm)
return; return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret) if (ret)
return; return;
vma = nouveau_bo_vma_find(nvbo, cli->base.vm); vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) { if (vma) {
if (--vma->refcount == 0) if (--vma->refcount == 0)
nouveau_gem_object_unmap(nvbo, vma); nouveau_gem_object_unmap(nvbo, vma);
...@@ -202,8 +202,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, ...@@ -202,8 +202,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset; rep->offset = nvbo->bo.offset;
if (cli->base.vm) { if (cli->vm) {
vma = nouveau_bo_vma_find(nvbo, cli->base.vm); vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma) if (!vma)
return -EINVAL; return -EINVAL;
......
...@@ -140,7 +140,7 @@ int ...@@ -140,7 +140,7 @@ int
nv84_fence_context_new(struct nouveau_channel *chan) nv84_fence_context_new(struct nouveau_channel *chan)
{ {
struct nouveau_fifo_chan *fifo = (void *)chan->object; struct nouveau_fifo_chan *fifo = (void *)chan->object;
struct nouveau_client *client = nouveau_client(fifo); struct nouveau_cli *cli = chan->cli;
struct nv84_fence_priv *priv = chan->drm->fence; struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx; struct nv84_fence_chan *fctx;
int ret, i; int ret, i;
...@@ -156,16 +156,16 @@ nv84_fence_context_new(struct nouveau_channel *chan) ...@@ -156,16 +156,16 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.emit32 = nv84_fence_emit32; fctx->base.emit32 = nv84_fence_emit32;
fctx->base.sync32 = nv84_fence_sync32; fctx->base.sync32 = nv84_fence_sync32;
ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) { if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart); &fctx->vma_gart);
} }
/* map display semaphore buffers into channel's vm */ /* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
} }
nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment