Commit 443f9e0b authored by Danilo Krummrich's avatar Danilo Krummrich

drm/nouveau: uapi: don't pass NO_PREFETCH flag implicitly

Currently, NO_PREFETCH is passed implicitly through
drm_nouveau_gem_pushbuf_push::length and drm_nouveau_exec_push::va_len.

Since this is a direct representation of how the HW is programmed it
isn't really future proof for a uAPI. Hence, fix this up for the new
uAPI and split up the va_len field of struct drm_nouveau_exec_push,
such that we keep 32bit for va_len and 32bit for flags.

For drm_nouveau_gem_pushbuf_push::length at least provide
NOUVEAU_GEM_PUSHBUF_NO_PREFETCH to indicate the bit shift.

While at it, fix up nv50_dma_push() as well, such that the caller
doesn't need to encode the NO_PREFETCH flag into the length parameter.
Signed-off-by: default avatarDanilo Krummrich <dakr@redhat.com>
Reviewed-by: default avatarFaith Ekstrand <faith.ekstrand@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230823181746.3446-1-dakr@redhat.com
parent c6b9075c
...@@ -69,16 +69,19 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) ...@@ -69,16 +69,19 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
} }
void void
nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length) nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
bool no_prefetch)
{ {
struct nvif_user *user = &chan->drm->client.device.user; struct nvif_user *user = &chan->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer; struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
BUG_ON(chan->dma.ib_free < 1); BUG_ON(chan->dma.ib_free < 1);
WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
(no_prefetch ? (1 << 31) : 0));
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
......
...@@ -31,7 +31,8 @@ ...@@ -31,7 +31,8 @@
#include "nouveau_chan.h" #include "nouveau_chan.h"
int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
void nv50_dma_push(struct nouveau_channel *, u64 addr, int length); void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
bool no_prefetch);
/* /*
* There's a hw race condition where you can't jump to your PUT offset, * There's a hw race condition where you can't jump to your PUT offset,
...@@ -45,6 +46,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int length); ...@@ -45,6 +46,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
*/ */
#define NOUVEAU_DMA_SKIPS (128 / 4) #define NOUVEAU_DMA_SKIPS (128 / 4)
/* Maximum push buffer size. */
#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff
/* Object handles - for stuff that's doesn't use handle == oclass. */ /* Object handles - for stuff that's doesn't use handle == oclass. */
enum { enum {
NvDmaFB = 0x80000002, NvDmaFB = 0x80000002,
...@@ -89,7 +93,7 @@ FIRE_RING(struct nouveau_channel *chan) ...@@ -89,7 +93,7 @@ FIRE_RING(struct nouveau_channel *chan)
if (chan->dma.ib_max) { if (chan->dma.ib_max) {
nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2), nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
(chan->dma.cur - chan->dma.put) << 2); (chan->dma.cur - chan->dma.put) << 2, false);
} else { } else {
WRITE_PUT(chan->dma.cur); WRITE_PUT(chan->dma.cur);
} }
......
...@@ -164,8 +164,10 @@ nouveau_exec_job_run(struct nouveau_job *job) ...@@ -164,8 +164,10 @@ nouveau_exec_job_run(struct nouveau_job *job)
} }
for (i = 0; i < exec_job->push.count; i++) { for (i = 0; i < exec_job->push.count; i++) {
nv50_dma_push(chan, exec_job->push.s[i].va, struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
exec_job->push.s[i].va_len); bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
} }
ret = nouveau_fence_emit(fence, chan); ret = nouveau_fence_emit(fence, chan);
...@@ -223,7 +225,18 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob, ...@@ -223,7 +225,18 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
{ {
struct nouveau_exec_job *job; struct nouveau_exec_job *job;
struct nouveau_job_args args = {}; struct nouveau_job_args args = {};
int ret; int i, ret;
for (i = 0; i < __args->push.count; i++) {
struct drm_nouveau_exec_push *p = &__args->push.s[i];
if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
NV_PRINTK(err, nouveau_cli(__args->file_priv),
"pushbuf size exceeds limit: 0x%x max 0x%x\n",
p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
return -EINVAL;
}
}
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL); job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) if (!job)
......
...@@ -856,9 +856,11 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -856,9 +856,11 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
for (i = 0; i < req->nr_push; i++) { for (i = 0; i < req->nr_push; i++) {
struct nouveau_vma *vma = (void *)(unsigned long) struct nouveau_vma *vma = (void *)(unsigned long)
bo[push[i].bo_index].user_priv; bo[push[i].bo_index].user_priv;
u64 addr = vma->addr + push[i].offset;
u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
nv50_dma_push(chan, vma->addr + push[i].offset, nv50_dma_push(chan, addr, length, no_prefetch);
push[i].length);
} }
} else } else
if (drm->client.device.info.chipset >= 0x25) { if (drm->client.device.info.chipset >= 0x25) {
......
...@@ -138,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push { ...@@ -138,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad; __u32 pad;
__u64 offset; __u64 offset;
__u64 length; __u64 length;
#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
}; };
struct drm_nouveau_gem_pushbuf { struct drm_nouveau_gem_pushbuf {
...@@ -338,7 +339,12 @@ struct drm_nouveau_exec_push { ...@@ -338,7 +339,12 @@ struct drm_nouveau_exec_push {
/** /**
* @va_len: the length of the push buffer mapping * @va_len: the length of the push buffer mapping
*/ */
__u64 va_len; __u32 va_len;
/**
* @flags: the flags for this push buffer mapping
*/
__u32 flags;
#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment