Commit 0d1aacac authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Embed the ring->private within the struct intel_ring_buffer

We now have more devices using ring->private than not, and they all want
the same structure. Worse, I would like to use a scratch page from
outside of intel_ringbuffer.c and so for convenience would like to reuse
ring->private. Embed the object into the struct intel_ringbuffer so that
we can keep the code clean.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent a52690e4
...@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, ...@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (WARN_ON(ring->id != RCS)) if (WARN_ON(ring->id != RCS))
return NULL; return NULL;
obj = ring->private; obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) && if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj); return i915_error_object_create(dev_priv, obj);
......
...@@ -33,16 +33,6 @@ ...@@ -33,16 +33,6 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
/*
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
struct pipe_control {
struct drm_i915_gem_object *obj;
volatile u32 *cpu_page;
u32 gtt_offset;
};
static inline int ring_space(struct intel_ring_buffer *ring) static inline int ring_space(struct intel_ring_buffer *ring)
{ {
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
...@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring, ...@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{ {
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
...@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, ...@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains) u32 invalidate_domains, u32 flush_domains)
{ {
u32 flags = 0; u32 flags = 0;
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */ /* Force SNB workarounds for PIPE_CONTROL flushes */
...@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, ...@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains) u32 invalidate_domains, u32 flush_domains)
{ {
u32 flags = 0; u32 flags = 0;
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
/* /*
...@@ -481,68 +468,43 @@ static int init_ring_common(struct intel_ring_buffer *ring) ...@@ -481,68 +468,43 @@ static int init_ring_common(struct intel_ring_buffer *ring)
static int static int
init_pipe_control(struct intel_ring_buffer *ring) init_pipe_control(struct intel_ring_buffer *ring)
{ {
struct pipe_control *pc;
struct drm_i915_gem_object *obj;
int ret; int ret;
if (ring->private) if (ring->scratch.obj)
return 0; return 0;
pc = kmalloc(sizeof(*pc), GFP_KERNEL); ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (!pc) if (ring->scratch.obj == NULL) {
return -ENOMEM;
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n"); DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret) if (ret)
goto err_unref; goto err_unref;
pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl)); ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
if (pc->cpu_page == NULL) { if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unpin; goto err_unpin;
} }
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
ring->name, pc->gtt_offset); ring->name, ring->scratch.gtt_offset);
pc->obj = obj;
ring->private = pc;
return 0; return 0;
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_unpin(ring->scratch.obj);
err_unref: err_unref:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&ring->scratch.obj->base);
err: err:
kfree(pc);
return ret; return ret;
} }
static void
cleanup_pipe_control(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;
obj = pc->obj;
kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
kfree(pc);
}
static int init_render_ring(struct intel_ring_buffer *ring) static int init_render_ring(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
...@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) ...@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
if (!ring->private) if (ring->scratch.obj == NULL)
return; return;
if (HAS_BROKEN_CS_TLB(dev)) if (INTEL_INFO(dev)->gen >= 5) {
drm_gem_object_unreference(to_gem_object(ring->private)); kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_unpin(ring->scratch.obj);
if (INTEL_INFO(dev)->gen >= 5) }
cleanup_pipe_control(ring);
ring->private = NULL; drm_gem_object_unreference(&ring->scratch.obj->base);
ring->scratch.obj = NULL;
} }
static void static void
...@@ -742,8 +704,7 @@ do { \ ...@@ -742,8 +704,7 @@ do { \
static int static int
pc_render_add_request(struct intel_ring_buffer *ring) pc_render_add_request(struct intel_ring_buffer *ring)
{ {
struct pipe_control *pc = ring->private; u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
...@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring) ...@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr); PIPE_CONTROL_FLUSH(ring, scratch_addr);
...@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring) ...@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY); PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_advance(ring); intel_ring_advance(ring);
...@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) ...@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32 static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{ {
struct pipe_control *pc = ring->private; return ring->scratch.cpu_page[0];
return pc->cpu_page[0];
} }
static void static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{ {
struct pipe_control *pc = ring->private; ring->scratch.cpu_page[0] = seqno;
pc->cpu_page[0] = seqno;
} }
static bool static bool
...@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, ...@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
} else { } else {
struct drm_i915_gem_object *obj = ring->private; u32 cs_offset = ring->scratch.gtt_offset;
u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT) if (len > I830_BATCH_LIMIT)
return -ENOSPC; return -ENOSPC;
...@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret; return ret;
} }
ring->private = obj; ring->scratch.obj = obj;
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
} }
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
......
...@@ -155,7 +155,11 @@ struct intel_ring_buffer { ...@@ -155,7 +155,11 @@ struct intel_ring_buffer {
struct intel_ring_hangcheck hangcheck; struct intel_ring_hangcheck hangcheck;
void *private; struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
}; };
static inline bool static inline bool
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment