Commit 0846c728 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-nouveau-next' of...

Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-next

- Page flipping fixes, with support for syncing them to vblank (finally...).
- Misc other general fixes

* 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6:
  drm/nouveau: do not map evicted vram buffers in nouveau_bo_vma_add
  drm/nvc0-/gr: shift wrapping bug in nvc0_grctx_generate_r406800
  drm/nouveau/pwr: fix missing mutex unlock in a failure path
  drm/nv40/therm: fix slowing down fan when pstate undefined
  drm/nv11-: synchronise flips to vblank, unless async flip requested
  drm/nvc0-: remove nasty fifo swmthd hack for flip completion method
  drm/nv10-: we no longer need to create nvsw object on user channels
  drm/nouveau: always queue flips relative to kernel channel activity
  drm/nouveau: there is no need to reserve/fence the new fb when flipping
  drm/nouveau: when bailing out of a pushbuf ioctl, do not remove previous fence
  drm/nouveau: allow nouveau_fence_ref() to be a noop
  drm/nvc8/mc: msi rearm is via the nvc0 method
parents ad40f83f d2c7ab32
......@@ -256,7 +256,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
......
......@@ -494,13 +494,6 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
if (stat & 0x00200000) {
if (mthd == 0x0054) {
if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
show &= ~0x00200000;
}
}
if (stat & 0x00800000) {
if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
......
......@@ -481,13 +481,6 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
if (stat & 0x00200000) {
if (mthd == 0x0054) {
if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
show &= ~0x00200000;
}
}
if (stat & 0x00800000) {
if (!nve0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
......
......@@ -1039,7 +1039,7 @@ nvc0_grctx_generate_r406800(struct nvc0_graph_priv *priv)
} while (!tpcnr[gpc]);
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc_set |= 1 << ((gpc * 8) + tpc);
tpc_set |= 1ULL << ((gpc * 8) + tpc);
}
nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
......
......@@ -32,6 +32,11 @@ nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
struct nouveau_subdev *subdev = nv_subdev(ppwr);
u32 addr;
/* wait for a free slot in the fifo */
addr = nv_rd32(ppwr, 0x10a4a0);
if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
return -EBUSY;
/* we currently only support a single process at a time waiting
* on a synchronous reply, take the PPWR mutex and tell the
* receive handler what we're waiting for
......@@ -42,11 +47,6 @@ nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
ppwr->recv.process = process;
}
/* wait for a free slot in the fifo */
addr = nv_rd32(ppwr, 0x10a4a0);
if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
return -EBUSY;
/* acquire data segment access */
do {
nv_wr32(ppwr, 0x10a580, 0x00000001);
......
......@@ -117,7 +117,8 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
priv->fan->bios.linear_max_temp) {
duty = nouveau_therm_update_linear(therm);
} else {
duty = priv->cstate;
if (priv->cstate)
duty = priv->cstate;
poll = false;
}
immd = false;
......
......@@ -298,7 +298,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
if (device->card_type < NV_C0) {
if (device->card_type < NV_10) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
init->subchan[1].handle = NvSw;
......
......@@ -98,12 +98,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
if (tile) {
spin_lock(&drm->tile.lock);
if (fence) {
/* Mark it as pending. */
tile->fence = fence;
nouveau_fence_ref(fence);
}
tile->fence = nouveau_fence_ref(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
......@@ -1462,14 +1457,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
{
struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
struct nouveau_fence *old_fence = NULL;
if (likely(fence))
nouveau_fence_ref(fence);
spin_lock(&nvbo->bo.bdev->fence_lock);
old_fence = nvbo->bo.sync_obj;
nvbo->bo.sync_obj = fence;
nvbo->bo.sync_obj = new_fence;
spin_unlock(&nvbo->bo.bdev->fence_lock);
nouveau_fence_unref(&old_fence);
......@@ -1552,7 +1545,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->vmm->spg_shift) {
if (node->sg)
nouveau_vm_map_sg_table(vma, 0, size, node);
else
......
......@@ -346,22 +346,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING(chan, 0x00000000);
/* allocate software object class (used for fences on <= nv05, and
* to signal flip completion), bind it to a subchannel.
*/
if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
/* allocate software object class (used for fences on <= nv05) */
if (device->card_type < NV_10) {
ret = nouveau_object_new(nv_object(client), chan->handle,
NvSw, nouveau_abi16_swclass(chan->drm),
NULL, 0, &object);
NvSw, 0x006e, NULL, 0, &object);
if (ret)
return ret;
swch = (void *)object->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = chan;
}
if (device->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
......
......@@ -26,7 +26,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include "nouveau_fbcon.h"
#include "dispnv04/hw.h"
......@@ -399,6 +398,11 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
if (nv_device(drm->device)->chipset < 0x11)
dev->mode_config.async_page_flip = false;
else
dev->mode_config.async_page_flip = true;
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
......@@ -555,19 +559,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
goto fail;
/* Emit the pageflip */
ret = RING_SPACE(chan, 3);
ret = RING_SPACE(chan, 2);
if (ret)
goto fail;
if (nv_device(drm->device)->card_type < NV_C0) {
if (nv_device(drm->device)->card_type < NV_C0)
BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
OUT_RING (chan, 0x00000000);
} else {
BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
OUT_RING (chan, 0);
BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
}
else
BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
ret = nouveau_fence_new(chan, false, pfence);
......@@ -584,22 +584,16 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
int
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
struct drm_pending_vblank_event *event, u32 flags)
{
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
struct nouveau_channel *chan = NULL;
struct nouveau_channel *chan = drm->channel;
struct nouveau_fence *fence;
struct ttm_validate_buffer resv[2] = {
{ .bo = &old_bo->bo },
{ .bo = &new_bo->bo },
};
struct ww_acquire_ctx ticket;
LIST_HEAD(res);
int ret;
if (!drm->channel)
......@@ -609,26 +603,22 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (!s)
return -ENOMEM;
/* Choose the channel the flip will be handled in */
spin_lock(&old_bo->bo.bdev->fence_lock);
fence = new_bo->bo.sync_obj;
if (fence)
chan = fence->channel;
if (!chan)
chan = drm->channel;
spin_unlock(&old_bo->bo.bdev->fence_lock);
/* synchronise rendering channel with the kernel's channel */
spin_lock(&new_bo->bo.bdev->fence_lock);
fence = nouveau_fence_ref(new_bo->bo.sync_obj);
spin_unlock(&new_bo->bo.bdev->fence_lock);
ret = nouveau_fence_sync(fence, chan);
if (ret)
return ret;
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
if (ret)
goto fail_free;
list_add(&resv[1].head, &res);
}
list_add(&resv[0].head, &res);
mutex_lock(&chan->cli->mutex);
ret = ttm_eu_reserve_buffers(&ticket, &res);
ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
if (ret)
goto fail_unpin;
......@@ -640,12 +630,29 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
ret = nv50_display_flip_next(crtc, fb, chan, 0);
ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
if (ret)
goto fail_unreserve;
} else {
struct nv04_display *dispnv04 = nv04_display(dev);
nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
int head = nouveau_crtc(crtc)->index;
if (swap_interval) {
ret = RING_SPACE(chan, 8);
if (ret)
goto fail_unreserve;
BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
OUT_RING (chan, 0);
BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
OUT_RING (chan, head);
BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
OUT_RING (chan, 0);
BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
OUT_RING (chan, 0);
}
nouveau_bo_ref(new_bo, &dispnv04->image[head]);
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
......@@ -656,14 +663,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Update the crtc struct and cleanup */
crtc->fb = fb;
ttm_eu_fence_buffer_objects(&ticket, &res, fence);
nouveau_bo_fence(old_bo, fence);
ttm_bo_unreserve(&old_bo->bo);
if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
ttm_eu_backoff_reservation(&ticket, &res);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&chan->cli->mutex);
if (old_bo != new_bo)
......
......@@ -51,9 +51,11 @@ enum {
NvSubCtxSurf2D = 0,
NvSubSw = 1,
NvSubImageBlit = 2,
NvSub2D = 3,
NvSubGdiRect = 3,
NvSubCopy = 4,
NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
};
/* Object handles. */
......@@ -194,7 +196,6 @@ WIND_RING(struct nouveau_channel *chan)
#define NV84_SUBCHAN_UEVENT 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
......
......@@ -37,6 +37,7 @@
#include <engine/device.h>
#include <engine/disp.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <subdev/vm.h>
......@@ -191,6 +192,32 @@ nouveau_accel_init(struct nouveau_drm *drm)
return;
}
ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
nouveau_abi16_swclass(drm), NULL, 0, &object);
if (ret == 0) {
struct nouveau_software_chan *swch = (void *)object->parent;
ret = RING_SPACE(drm->channel, 2);
if (ret == 0) {
if (device->card_type < NV_C0) {
BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
OUT_RING (drm->channel, NVDRM_NVSW);
} else
if (device->card_type < NV_E0) {
BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
OUT_RING (drm->channel, 0x001f0000);
}
}
swch = (void *)object->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = drm->channel;
}
if (ret) {
NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
if (device->card_type < NV_C0) {
ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
&drm->notify);
......
......@@ -56,6 +56,7 @@ enum nouveau_drm_handle {
NVDRM_CONTROL = 0xdddddddc,
NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
NVDRM_CHAN = 0xcccc0000, /* |= client chid */
NVDRM_NVSW = 0x55550000,
};
struct nouveau_cli {
......
......@@ -306,7 +306,8 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *fence)
{
kref_get(&fence->kref);
if (fence)
kref_get(&fence->kref);
return fence;
}
......
......@@ -106,8 +106,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
if (mapped) {
spin_lock(&nvbo->bo.bdev->fence_lock);
if (nvbo->bo.sync_obj)
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
}
......@@ -309,7 +308,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
nouveau_bo_fence(nvbo, fence);
if (likely(fence))
nouveau_bo_fence(nvbo, fence);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
......@@ -438,8 +438,7 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
int ret = 0;
spin_lock(&nvbo->bo.bdev->fence_lock);
if (nvbo->bo.sync_obj)
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (fence) {
......
......@@ -255,6 +255,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, NvCtxSurf2D);
BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
OUT_RING(chan, 2);
}
BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
OUT_RING(chan, NvGdiRect);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment