Commit bf852799 authored by Christian König's avatar Christian König Committed by Dave Airlie

drm/radeon: make cp variable an array

Replace cp, cp1 and cp2 members with just an array
of radeon_cp structs.
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 5596a9db
...@@ -1311,7 +1311,7 @@ void evergreen_mc_program(struct radeon_device *rdev) ...@@ -1311,7 +1311,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/ */
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
/* set to DX10/11 mode */ /* set to DX10/11 mode */
radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
...@@ -1362,7 +1362,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) ...@@ -1362,7 +1362,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev) static int evergreen_cp_start(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r, i; int r, i;
uint32_t cp_me; uint32_t cp_me;
...@@ -1428,7 +1428,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) ...@@ -1428,7 +1428,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
int evergreen_cp_resume(struct radeon_device *rdev) int evergreen_cp_resume(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp; u32 tmp;
u32 rb_bufsz; u32 rb_bufsz;
int r; int r;
...@@ -3056,7 +3056,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -3056,7 +3056,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
static int evergreen_startup(struct radeon_device *rdev) static int evergreen_startup(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
/* enable pcie gen2 link */ /* enable pcie gen2 link */
...@@ -3168,7 +3168,7 @@ int evergreen_resume(struct radeon_device *rdev) ...@@ -3168,7 +3168,7 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev); r700_cp_stop(rdev);
...@@ -3251,8 +3251,8 @@ int evergreen_init(struct radeon_device *rdev) ...@@ -3251,8 +3251,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
rdev->cp.ring_obj = NULL; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->cp, 1024 * 1024); r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
......
...@@ -49,7 +49,7 @@ static void ...@@ -49,7 +49,7 @@ static void
set_render_target(struct radeon_device *rdev, int format, set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr) int w, int h, u64 gpu_addr)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info; u32 cb_color_info;
int pitch, slice; int pitch, slice;
...@@ -88,7 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev, ...@@ -88,7 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size, u32 sync_type, u32 size,
u64 mc_addr) u64 mc_addr)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size; u32 cp_coher_size;
if (size == 0xffffffff) if (size == 0xffffffff)
...@@ -116,7 +116,7 @@ cp_set_surface_sync(struct radeon_device *rdev, ...@@ -116,7 +116,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
static void static void
set_shaders(struct radeon_device *rdev) set_shaders(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr; u64 gpu_addr;
/* VS */ /* VS */
...@@ -144,7 +144,7 @@ set_shaders(struct radeon_device *rdev) ...@@ -144,7 +144,7 @@ set_shaders(struct radeon_device *rdev)
static void static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3; u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
/* high addr, stride */ /* high addr, stride */
...@@ -189,7 +189,7 @@ set_tex_resource(struct radeon_device *rdev, ...@@ -189,7 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch, int format, int w, int h, int pitch,
u64 gpu_addr, u32 size) u64 gpu_addr, u32 size)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1; u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7; u32 sq_tex_resource_word4, sq_tex_resource_word7;
...@@ -230,7 +230,7 @@ static void ...@@ -230,7 +230,7 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1, set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2) int x2, int y2)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */ /* workaround some hw bugs */
if (x2 == 0) if (x2 == 0)
x1 = 1; x1 = 1;
...@@ -261,7 +261,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1, ...@@ -261,7 +261,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
static void static void
draw_auto(struct radeon_device *rdev) draw_auto(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(cp, DI_PT_RECTLIST); radeon_ring_write(cp, DI_PT_RECTLIST);
...@@ -286,7 +286,7 @@ draw_auto(struct radeon_device *rdev) ...@@ -286,7 +286,7 @@ draw_auto(struct radeon_device *rdev)
static void static void
set_default_state(struct radeon_device *rdev) set_default_state(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
......
...@@ -1049,7 +1049,7 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev) ...@@ -1049,7 +1049,7 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
static int cayman_cp_start(struct radeon_device *rdev) static int cayman_cp_start(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r, i; int r, i;
r = radeon_ring_lock(rdev, cp, 7); r = radeon_ring_lock(rdev, cp, 7);
...@@ -1116,7 +1116,7 @@ static int cayman_cp_start(struct radeon_device *rdev) ...@@ -1116,7 +1116,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev) static void cayman_cp_fini(struct radeon_device *rdev)
{ {
cayman_cp_enable(rdev, false); cayman_cp_enable(rdev, false);
radeon_ring_fini(rdev, &rdev->cp); radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
} }
int cayman_cp_resume(struct radeon_device *rdev) int cayman_cp_resume(struct radeon_device *rdev)
...@@ -1147,7 +1147,7 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1147,7 +1147,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */ /* ring 0 - compute and gfx */
/* Set ring buffer size */ /* Set ring buffer size */
cp = &rdev->cp; cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
rb_bufsz = drm_order(cp->ring_size / 8); rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
...@@ -1181,7 +1181,7 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1181,7 +1181,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring1 - compute only */ /* ring1 - compute only */
/* Set ring buffer size */ /* Set ring buffer size */
cp = &rdev->cp1; cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX];
rb_bufsz = drm_order(cp->ring_size / 8); rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
...@@ -1207,7 +1207,7 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1207,7 +1207,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring2 - compute only */ /* ring2 - compute only */
/* Set ring buffer size */ /* Set ring buffer size */
cp = &rdev->cp2; cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX];
rb_bufsz = drm_order(cp->ring_size / 8); rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
...@@ -1233,15 +1233,15 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1233,15 +1233,15 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* start the rings */ /* start the rings */
cayman_cp_start(rdev); cayman_cp_start(rdev);
rdev->cp.ready = true; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true;
rdev->cp1.ready = true; rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
rdev->cp2.ready = true; rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
/* this only test cp0 */ /* this only test cp0 */
r = radeon_ring_test(rdev, &rdev->cp); r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
if (r) { if (r) {
rdev->cp.ready = false; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->cp1.ready = false; rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->cp2.ready = false; rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
return r; return r;
} }
...@@ -1343,7 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev) ...@@ -1343,7 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
static int cayman_startup(struct radeon_device *rdev) static int cayman_startup(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
/* enable pcie gen2 link */ /* enable pcie gen2 link */
...@@ -1438,7 +1438,7 @@ int cayman_suspend(struct radeon_device *rdev) ...@@ -1438,7 +1438,7 @@ int cayman_suspend(struct radeon_device *rdev)
{ {
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
cayman_cp_enable(rdev, false); cayman_cp_enable(rdev, false);
rdev->cp.ready = false; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
evergreen_irq_suspend(rdev); evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev); radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev); cayman_pcie_gart_disable(rdev);
...@@ -1455,7 +1455,7 @@ int cayman_suspend(struct radeon_device *rdev) ...@@ -1455,7 +1455,7 @@ int cayman_suspend(struct radeon_device *rdev)
*/ */
int cayman_init(struct radeon_device *rdev) int cayman_init(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
/* This don't do much */ /* This don't do much */
......
...@@ -811,7 +811,7 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) ...@@ -811,7 +811,7 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
void r100_fence_ring_emit(struct radeon_device *rdev, void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[fence->ring];
/* We have to make sure that caches are flushed before /* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */ * CPU might read something from VRAM. */
...@@ -849,7 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -849,7 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages; uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch; uint32_t pitch;
...@@ -934,7 +934,7 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev) ...@@ -934,7 +934,7 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
void r100_ring_start(struct radeon_device *rdev) void r100_ring_start(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
r = radeon_ring_lock(rdev, cp, 2); r = radeon_ring_lock(rdev, cp, 2);
...@@ -1048,7 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev) ...@@ -1048,7 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz; unsigned rb_bufsz;
unsigned rb_blksz; unsigned rb_blksz;
unsigned max_fetch; unsigned max_fetch;
...@@ -1162,7 +1162,7 @@ void r100_cp_fini(struct radeon_device *rdev) ...@@ -1162,7 +1162,7 @@ void r100_cp_fini(struct radeon_device *rdev)
} }
/* Disable ring */ /* Disable ring */
r100_cp_disable(rdev); r100_cp_disable(rdev);
radeon_ring_fini(rdev, &rdev->cp); radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n"); DRM_INFO("radeon: cp finalized\n");
} }
...@@ -1170,7 +1170,7 @@ void r100_cp_disable(struct radeon_device *rdev) ...@@ -1170,7 +1170,7 @@ void r100_cp_disable(struct radeon_device *rdev)
{ {
/* Disable ring */ /* Disable ring */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rdev->cp.ready = false; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0); WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0); WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0); WREG32(R_000770_SCRATCH_UMSK, 0);
...@@ -2587,7 +2587,7 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) ...@@ -2587,7 +2587,7 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp; uint32_t rdp, wdp;
unsigned count, i, j; unsigned count, i, j;
...@@ -3686,7 +3686,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) ...@@ -3686,7 +3686,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1)); radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(cp, ib->gpu_addr); radeon_ring_write(cp, ib->gpu_addr);
...@@ -3778,7 +3778,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) ...@@ -3778,7 +3778,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
/* Shutdown CP we shouldn't need to do that but better be safe than /* Shutdown CP we shouldn't need to do that but better be safe than
* sorry * sorry
*/ */
rdev->cp.ready = false; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0); WREG32(R_000740_CP_CSQ_CNTL, 0);
/* Save few CRTC registers */ /* Save few CRTC registers */
......
...@@ -87,7 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -87,7 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size; uint32_t size;
uint32_t cur_size; uint32_t cur_size;
int i, num_loops; int i, num_loops;
......
...@@ -175,7 +175,7 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) ...@@ -175,7 +175,7 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
void r300_fence_ring_emit(struct radeon_device *rdev, void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[fence->ring];
/* Who ever call radeon_fence_emit should call ring_lock and ask /* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */ * for enough space (today caller are ib schedule and buffer move) */
...@@ -208,7 +208,7 @@ void r300_fence_ring_emit(struct radeon_device *rdev, ...@@ -208,7 +208,7 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
void r300_ring_start(struct radeon_device *rdev) void r300_ring_start(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
unsigned gb_tile_config; unsigned gb_tile_config;
int r; int r;
......
...@@ -199,7 +199,7 @@ static void r420_clock_resume(struct radeon_device *rdev) ...@@ -199,7 +199,7 @@ static void r420_clock_resume(struct radeon_device *rdev)
static void r420_cp_errata_init(struct radeon_device *rdev) static void r420_cp_errata_init(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* RV410 and R420 can lock up if CP DMA to host memory happens /* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy. * while the 2D engine is busy.
...@@ -217,7 +217,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev) ...@@ -217,7 +217,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
static void r420_cp_errata_fini(struct radeon_device *rdev) static void r420_cp_errata_fini(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* Catch the RESYNC we dispatched all the way back, /* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init. * at the very beginning of the CP init.
......
...@@ -2144,7 +2144,7 @@ static int r600_cp_load_microcode(struct radeon_device *rdev) ...@@ -2144,7 +2144,7 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
int r600_cp_start(struct radeon_device *rdev) int r600_cp_start(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
uint32_t cp_me; uint32_t cp_me;
...@@ -2174,7 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev) ...@@ -2174,7 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
int r600_cp_resume(struct radeon_device *rdev) int r600_cp_resume(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp; u32 tmp;
u32 rb_bufsz; u32 rb_bufsz;
int r; int r;
...@@ -2248,7 +2248,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned r ...@@ -2248,7 +2248,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned r
void r600_cp_fini(struct radeon_device *rdev) void r600_cp_fini(struct radeon_device *rdev)
{ {
r600_cp_stop(rdev); r600_cp_stop(rdev);
radeon_ring_fini(rdev, &rdev->cp); radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
} }
...@@ -2271,7 +2271,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) ...@@ -2271,7 +2271,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
{ {
uint32_t scratch; uint32_t scratch;
uint32_t tmp = 0; uint32_t tmp = 0;
unsigned i; unsigned i, ridx = radeon_ring_index(rdev, cp);
int r; int r;
r = radeon_scratch_get(rdev, &scratch); r = radeon_scratch_get(rdev, &scratch);
...@@ -2282,7 +2282,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) ...@@ -2282,7 +2282,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
WREG32(scratch, 0xCAFEDEAD); WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, cp, 3); r = radeon_ring_lock(rdev, cp, 3);
if (r) { if (r) {
DRM_ERROR("radeon: cp failed to lock ring %p (%d).\n", cp, r); DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
radeon_scratch_free(rdev, scratch); radeon_scratch_free(rdev, scratch);
return r; return r;
} }
...@@ -2297,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) ...@@ -2297,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
DRM_UDELAY(1); DRM_UDELAY(1);
} }
if (i < rdev->usec_timeout) { if (i < rdev->usec_timeout) {
DRM_INFO("ring test on %p succeeded in %d usecs\n", cp, i); DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
} else { } else {
DRM_ERROR("radeon: ring %p test failed (scratch(0x%04X)=0x%08X)\n", DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
cp, scratch, tmp); ridx, scratch, tmp);
r = -EINVAL; r = -EINVAL;
} }
radeon_scratch_free(rdev, scratch); radeon_scratch_free(rdev, scratch);
...@@ -2310,7 +2310,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) ...@@ -2310,7 +2310,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
void r600_fence_ring_emit(struct radeon_device *rdev, void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[fence->ring];
if (rdev->wb.use_event) { if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
...@@ -2420,7 +2420,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) ...@@ -2420,7 +2420,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
int r600_startup(struct radeon_device *rdev) int r600_startup(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
/* enable pcie gen2 link */ /* enable pcie gen2 link */
...@@ -2534,7 +2534,7 @@ int r600_suspend(struct radeon_device *rdev) ...@@ -2534,7 +2534,7 @@ int r600_suspend(struct radeon_device *rdev)
r600_audio_fini(rdev); r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev); r600_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev); r600_irq_suspend(rdev);
radeon_wb_disable(rdev); radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev); r600_pcie_gart_disable(rdev);
...@@ -2609,8 +2609,8 @@ int r600_init(struct radeon_device *rdev) ...@@ -2609,8 +2609,8 @@ int r600_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
rdev->cp.ring_obj = NULL; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->cp, 1024 * 1024); r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
...@@ -2677,7 +2677,7 @@ void r600_fini(struct radeon_device *rdev) ...@@ -2677,7 +2677,7 @@ void r600_fini(struct radeon_device *rdev)
*/ */
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
/* FIXME: implement */ /* FIXME: implement */
radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
...@@ -3518,7 +3518,7 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data) ...@@ -3518,7 +3518,7 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
unsigned count, i, j; unsigned count, i, j;
radeon_ring_free_size(rdev, cp); radeon_ring_free_size(rdev, cp);
......
...@@ -50,7 +50,7 @@ static void ...@@ -50,7 +50,7 @@ static void
set_render_target(struct radeon_device *rdev, int format, set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr) int w, int h, u64 gpu_addr)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info; u32 cb_color_info;
int pitch, slice; int pitch, slice;
...@@ -104,7 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev, ...@@ -104,7 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size, u32 sync_type, u32 size,
u64 mc_addr) u64 mc_addr)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size; u32 cp_coher_size;
if (size == 0xffffffff) if (size == 0xffffffff)
...@@ -123,7 +123,7 @@ cp_set_surface_sync(struct radeon_device *rdev, ...@@ -123,7 +123,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
static void static void
set_shaders(struct radeon_device *rdev) set_shaders(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr; u64 gpu_addr;
u32 sq_pgm_resources; u32 sq_pgm_resources;
...@@ -170,7 +170,7 @@ set_shaders(struct radeon_device *rdev) ...@@ -170,7 +170,7 @@ set_shaders(struct radeon_device *rdev)
static void static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2; u32 sq_vtx_constant_word2;
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
...@@ -207,7 +207,7 @@ set_tex_resource(struct radeon_device *rdev, ...@@ -207,7 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch, int format, int w, int h, int pitch,
u64 gpu_addr, u32 size) u64 gpu_addr, u32 size)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
if (h < 1) if (h < 1)
...@@ -246,7 +246,7 @@ static void ...@@ -246,7 +246,7 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1, set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2) int x2, int y2)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(cp, (x1 << 0) | (y1 << 16)); radeon_ring_write(cp, (x1 << 0) | (y1 << 16));
...@@ -267,7 +267,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1, ...@@ -267,7 +267,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
static void static void
draw_auto(struct radeon_device *rdev) draw_auto(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(cp, DI_PT_RECTLIST); radeon_ring_write(cp, DI_PT_RECTLIST);
...@@ -292,7 +292,7 @@ draw_auto(struct radeon_device *rdev) ...@@ -292,7 +292,7 @@ draw_auto(struct radeon_device *rdev)
static void static void
set_default_state(struct radeon_device *rdev) set_default_state(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
...@@ -687,7 +687,7 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages, ...@@ -687,7 +687,7 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
int ring_size; int ring_size;
int num_loops = 0; int num_loops = 0;
...@@ -727,7 +727,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) ...@@ -727,7 +727,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
if (fence) if (fence)
r = radeon_fence_emit(rdev, fence); r = radeon_fence_emit(rdev, fence);
radeon_ring_unlock_commit(rdev, &rdev->cp); radeon_ring_unlock_commit(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
} }
void r600_kms_blit_copy(struct radeon_device *rdev, void r600_kms_blit_copy(struct radeon_device *rdev,
......
...@@ -547,6 +547,7 @@ struct r600_ih { ...@@ -547,6 +547,7 @@ struct r600_ih {
struct radeon_bo *ring_obj; struct radeon_bo *ring_obj;
volatile uint32_t *ring; volatile uint32_t *ring;
unsigned rptr; unsigned rptr;
unsigned rptr_offs;
unsigned wptr; unsigned wptr;
unsigned wptr_old; unsigned wptr_old;
unsigned ring_size; unsigned ring_size;
...@@ -598,6 +599,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev); ...@@ -598,6 +599,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev); int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */ /* Ring access between begin & end cannot sleep */
int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp); void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
...@@ -1284,9 +1286,7 @@ struct radeon_device { ...@@ -1284,9 +1286,7 @@ struct radeon_device {
rwlock_t fence_lock; rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
struct radeon_semaphore_driver semaphore_drv; struct radeon_semaphore_driver semaphore_drv;
struct radeon_cp cp; struct radeon_cp cp[RADEON_NUM_RINGS];
struct radeon_cp cp1;
struct radeon_cp cp2;
struct radeon_ib_pool ib_pool; struct radeon_ib_pool ib_pool;
struct radeon_irq irq; struct radeon_irq irq;
struct radeon_asic *asic; struct radeon_asic *asic;
......
...@@ -718,7 +718,8 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -718,7 +718,8 @@ int radeon_device_init(struct radeon_device *rdev,
* can recall function without having locking issues */ * can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex); radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex); mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex); for (i = 0; i < RADEON_NUM_RINGS; ++i)
mutex_init(&rdev->cp[i].mutex);
mutex_init(&rdev->dc_hw_i2c_mutex); mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600) if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock); spin_lock_init(&rdev->ih.lock);
......
...@@ -84,7 +84,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) ...@@ -84,7 +84,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
return 0; return 0;
} }
fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
if (!rdev->cp.ready) if (!rdev->cp[fence->ring].ready)
/* FIXME: cp is not running assume everythings is done right /* FIXME: cp is not running assume everythings is done right
* away * away
*/ */
...@@ -269,7 +269,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) ...@@ -269,7 +269,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
* if we experiencing a lockup the value doesn't change * if we experiencing a lockup the value doesn't change
*/ */
if (seq == rdev->fence_drv[fence->ring].last_seq && if (seq == rdev->fence_drv[fence->ring].last_seq &&
radeon_gpu_is_lockup(rdev, &rdev->cp)) { radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) {
/* good news we believe it's a lockup */ /* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq); fence->seq, seq);
......
...@@ -152,6 +152,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, ...@@ -152,6 +152,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data; struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM]; man = &rdev->mman.bdev.man[TTM_PL_VRAM];
...@@ -161,7 +162,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, ...@@ -161,7 +162,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev); args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
args->gart_size -= rdev->cp.ring_size; for(i = 0; i < RADEON_NUM_RINGS; ++i)
args->gart_size -= rdev->cp[i].ring_size;
return 0; return 0;
} }
......
...@@ -252,8 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) ...@@ -252,8 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex); mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex); mutex_lock(&rdev->vram_mutex);
if (rdev->cp.ring_obj) for (i = 0; i < RADEON_NUM_RINGS; ++i) {
mutex_lock(&rdev->cp.mutex); if (rdev->cp[i].ring_obj)
mutex_lock(&rdev->cp[i].mutex);
}
/* gui idle int has issues on older chips it seems */ /* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) { if (rdev->family >= CHIP_R600) {
...@@ -269,11 +271,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) ...@@ -269,11 +271,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_irq_set(rdev); radeon_irq_set(rdev);
} }
} else { } else {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
if (cp->ready) { if (cp->ready) {
struct radeon_fence *fence; struct radeon_fence *fence;
radeon_ring_alloc(rdev, cp, 64); radeon_ring_alloc(rdev, cp, 64);
radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, cp));
radeon_fence_emit(rdev, fence); radeon_fence_emit(rdev, fence);
radeon_ring_commit(rdev, cp); radeon_ring_commit(rdev, cp);
radeon_fence_wait(fence, false); radeon_fence_wait(fence, false);
...@@ -309,8 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) ...@@ -309,8 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
if (rdev->cp.ring_obj) for (i = 0; i < RADEON_NUM_RINGS; ++i) {
mutex_unlock(&rdev->cp.mutex); if (rdev->cp[i].ring_obj)
mutex_unlock(&rdev->cp[i].mutex);
}
mutex_unlock(&rdev->vram_mutex); mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex); mutex_unlock(&rdev->ddev->struct_mutex);
} }
......
...@@ -178,7 +178,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) ...@@ -178,7 +178,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
int r = 0; int r = 0;
if (!ib->length_dw || !cp->ready) { if (!ib->length_dw || !cp->ready) {
...@@ -284,6 +284,21 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) ...@@ -284,6 +284,21 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
/* /*
* Ring. * Ring.
*/ */
int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp)
{
/* r1xx-r5xx only has CP ring */
if (rdev->family < CHIP_R600)
return RADEON_RING_TYPE_GFX_INDEX;
if (rdev->family >= CHIP_CAYMAN) {
if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX])
return CAYMAN_RING_TYPE_CP1_INDEX;
else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX])
return CAYMAN_RING_TYPE_CP2_INDEX;
}
return RADEON_RING_TYPE_GFX_INDEX;
}
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp) void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
{ {
if (rdev->wb.enabled) if (rdev->wb.enabled)
...@@ -312,7 +327,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ...@@ -312,7 +327,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned
if (ndw < cp->ring_free_dw) { if (ndw < cp->ring_free_dw) {
break; break;
} }
r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX); r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp));
if (r) if (r)
return r; return r;
} }
......
...@@ -121,13 +121,13 @@ int radeon_semaphore_create(struct radeon_device *rdev, ...@@ -121,13 +121,13 @@ int radeon_semaphore_create(struct radeon_device *rdev,
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore) struct radeon_semaphore *semaphore)
{ {
radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, false); radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, false);
} }
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore) struct radeon_semaphore *semaphore)
{ {
radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, true); radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, true);
} }
void radeon_semaphore_free(struct radeon_device *rdev, void radeon_semaphore_free(struct radeon_device *rdev,
......
...@@ -43,7 +43,8 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -43,7 +43,8 @@ void radeon_test_moves(struct radeon_device *rdev)
* (Total GTT - IB pool - writeback page - ring buffers) / test size * (Total GTT - IB pool - writeback page - ring buffers) / test size
*/ */
n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
n -= rdev->cp.ring_size; for (i = 0; i < RADEON_NUM_RINGS; ++i)
n -= rdev->cp[i].ring_size;
if (rdev->wb.wb_obj) if (rdev->wb.wb_obj)
n -= RADEON_GPU_PAGE_SIZE; n -= RADEON_GPU_PAGE_SIZE;
if (rdev->ih.ring_obj) if (rdev->ih.ring_obj)
......
...@@ -188,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, ...@@ -188,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo); rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
if (rbo->rdev->cp.ready == false) if (rbo->rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
...@@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL; return -EINVAL;
} }
if (!rdev->cp.ready) { if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready) {
DRM_ERROR("Trying to move memory with CP turned off.\n"); DRM_ERROR("Trying to move memory with CP turned off.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -380,7 +380,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, ...@@ -380,7 +380,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
radeon_move_null(bo, new_mem); radeon_move_null(bo, new_mem);
return 0; return 0;
} }
if (!rdev->cp.ready || rdev->asic->copy == NULL) { if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
/* use memcpy */ /* use memcpy */
goto memcpy; goto memcpy;
} }
......
...@@ -55,7 +55,7 @@ void rv515_debugfs(struct radeon_device *rdev) ...@@ -55,7 +55,7 @@ void rv515_debugfs(struct radeon_device *rdev)
void rv515_ring_start(struct radeon_device *rdev) void rv515_ring_start(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
r = radeon_ring_lock(rdev, cp, 64); r = radeon_ring_lock(rdev, cp, 64);
......
...@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) ...@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev) void r700_cp_fini(struct radeon_device *rdev)
{ {
r700_cp_stop(rdev); r700_cp_stop(rdev);
radeon_ring_fini(rdev, &rdev->cp); radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
} }
/* /*
...@@ -1043,7 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev) ...@@ -1043,7 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev) static int rv770_startup(struct radeon_device *rdev)
{ {
struct radeon_cp *cp = &rdev->cp; struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
/* enable pcie gen2 link */ /* enable pcie gen2 link */
...@@ -1144,7 +1144,7 @@ int rv770_suspend(struct radeon_device *rdev) ...@@ -1144,7 +1144,7 @@ int rv770_suspend(struct radeon_device *rdev)
r600_audio_fini(rdev); r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev); r700_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev); r600_irq_suspend(rdev);
radeon_wb_disable(rdev); radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev); rv770_pcie_gart_disable(rdev);
...@@ -1217,8 +1217,8 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1217,8 +1217,8 @@ int rv770_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
rdev->cp.ring_obj = NULL; rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->cp, 1024 * 1024); r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment