Commit 416a2bd2 authored by Alex Deucher's avatar Alex Deucher Committed by Dave Airlie

drm/radeon: fixup tiling group size and backendmap on r6xx-r9xx (v4)

Tiling group size is always 256bits on r6xx/r7xx/r8xx/9xx. Also fix and
simplify render backend map. This now properly sets up the backend map
on r6xx-9xx which should improve 3D performance.

Vadim benchmarked also:
Some benchmarks on juniper (5750), fullscreen 1920x1080,
first result - kernel 3.4.0+ (fb21affa), second - with these patches:

Lightsmark:   91 fps => 123 fps    +35%
Doom3:        74 fps => 101 fps    +36%
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 95c4b23e
...@@ -1558,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev) ...@@ -1558,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* /*
* Core functions * Core functions
*/ */
static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 cur_pipe;
u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
if (num_tile_pipes > EVERGREEN_MAX_PIPES)
num_tile_pipes = EVERGREEN_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > EVERGREEN_MAX_BACKENDS)
num_backends = EVERGREEN_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
force_no_swizzle = false;
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
}
return backend_map;
}
static void evergreen_gpu_init(struct radeon_device *rdev) static void evergreen_gpu_init(struct radeon_device *rdev)
{ {
u32 cc_rb_backend_disable = 0; u32 gb_addr_config;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg; u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 grbm_gfx_index;
u32 sx_debug_1; u32 sx_debug_1;
u32 smx_dc_ctl0; u32 smx_dc_ctl0;
u32 sq_config; u32 sq_config;
...@@ -1729,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1729,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
u32 sq_stack_resource_mgmt_3; u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation; u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp; u32 hdp_host_path_cntl, tmp;
u32 disabled_rb_mask;
int i, j, num_shader_engines, ps_thread_count; int i, j, num_shader_engines, ps_thread_count;
switch (rdev->family) { switch (rdev->family) {
...@@ -1753,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1753,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_JUNIPER: case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1; rdev->config.evergreen.num_ses = 1;
...@@ -1774,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1774,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_REDWOOD: case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1; rdev->config.evergreen.num_ses = 1;
...@@ -1795,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1795,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_CEDAR: case CHIP_CEDAR:
default: default:
...@@ -1817,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1817,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_PALM: case CHIP_PALM:
rdev->config.evergreen.num_ses = 1; rdev->config.evergreen.num_ses = 1;
...@@ -1838,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1838,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_SUMO: case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1; rdev->config.evergreen.num_ses = 1;
...@@ -1865,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1865,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_SUMO2: case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1; rdev->config.evergreen.num_ses = 1;
...@@ -1886,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1886,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_BARTS: case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2; rdev->config.evergreen.num_ses = 2;
...@@ -1907,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1907,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_TURKS: case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1; rdev->config.evergreen.num_ses = 1;
...@@ -1928,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1928,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_CAICOS: case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1; rdev->config.evergreen.num_ses = 1;
...@@ -1949,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1949,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
break; break;
} }
...@@ -1965,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1965,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
evergreen_fix_pci_max_read_req_size(rdev); evergreen_fix_pci_max_read_req_size(rdev);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
& EVERGREEN_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
& EVERGREEN_MAX_SIMDS_MASK);
cc_rb_backend_disable =
BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
& EVERGREEN_MAX_BACKENDS_MASK);
mc_shared_chmap = RREG32(MC_SHARED_CHMAP); mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if ((rdev->family == CHIP_PALM) || if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) || (rdev->family == CHIP_SUMO) ||
...@@ -1987,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -1987,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
else else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
switch (rdev->config.evergreen.max_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
gb_addr_config |= ROW_SIZE(2);
else
gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
if (rdev->ddev->pdev->device == 0x689e) {
u32 efuse_straps_4;
u32 efuse_straps_3;
u8 efuse_box_bit_131_124;
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
switch(efuse_box_bit_131_124) {
case 0x00:
gb_backend_map = 0x76543210;
break;
case 0x55:
gb_backend_map = 0x77553311;
break;
case 0x56:
gb_backend_map = 0x77553300;
break;
case 0x59:
gb_backend_map = 0x77552211;
break;
case 0x66:
gb_backend_map = 0x77443300;
break;
case 0x99:
gb_backend_map = 0x66552211;
break;
case 0x5a:
gb_backend_map = 0x77552200;
break;
case 0xaa:
gb_backend_map = 0x66442200;
break;
case 0x95:
gb_backend_map = 0x66553311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else if (rdev->ddev->pdev->device == 0x68b9) {
u32 efuse_straps_3;
u8 efuse_box_bit_127_124;
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
switch(efuse_box_bit_127_124) {
case 0x0:
gb_backend_map = 0x00003210;
break;
case 0x5:
case 0x6:
case 0x9:
case 0xa:
gb_backend_map = 0x00003311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else {
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
}
}
/* setup tiling info dword. gb_addr_config is not adequate since it does /* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword. * not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes * bits 3:0 num_pipes
...@@ -2147,42 +1863,48 @@ static void evergreen_gpu_init(struct radeon_device *rdev) ...@@ -2147,42 +1863,48 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
else else
rdev->config.evergreen.tile_config |= 0 << 4; rdev->config.evergreen.tile_config |= 0 << 4;
} }
rdev->config.evergreen.tile_config |= rdev->config.evergreen.tile_config |= 0 << 8;
((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
rdev->config.evergreen.tile_config |= rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12; ((gb_addr_config & 0x30000000) >> 28) << 12;
rdev->config.evergreen.backend_map = gb_backend_map; num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
for (i = 0; i < rdev->config.evergreen.num_ses; i++) { if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
u32 rb = cc_rb_backend_disable | (0xf0 << 16); u32 efuse_straps_4;
u32 sp = cc_gc_shader_pipe_config; u32 efuse_straps_3;
u32 gfx = grbm_gfx_index | SE_INDEX(i);
if (i == num_shader_engines) { WREG32(RCU_IND_INDEX, 0x204);
rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); efuse_straps_4 = RREG32(RCU_IND_DATA);
sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
tmp = (((efuse_straps_4 & 0xf) << 4) |
((efuse_straps_3 & 0xf0000000) >> 28));
} else {
tmp = 0;
for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
u32 rb_disable_bitmap;
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
tmp <<= 4;
tmp |= rb_disable_bitmap;
} }
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
WREG32(GRBM_GFX_INDEX, gfx); WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, gfx); WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(CC_RB_BACKEND_DISABLE, rb); WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(GC_USER_RB_BACKEND_DISABLE, rb); WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
}
grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES; tmp = gb_addr_config & NUM_PIPES_MASK;
WREG32(GRBM_GFX_INDEX, grbm_gfx_index); tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
WREG32(RLC_GFX_INDEX, grbm_gfx_index); EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0);
......
...@@ -37,6 +37,15 @@ ...@@ -37,6 +37,15 @@
#define EVERGREEN_MAX_PIPES_MASK 0xFF #define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF #define EVERGREEN_MAX_LDS_NUM 0xFFFF
#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
/* Registers */ /* Registers */
#define RCU_IND_INDEX 0x100 #define RCU_IND_INDEX 0x100
...@@ -54,6 +63,7 @@ ...@@ -54,6 +63,7 @@
#define BACKEND_DISABLE(x) ((x) << 16) #define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8 #define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0) #define NUM_PIPES(x) ((x) << 0)
#define NUM_PIPES_MASK 0x0000000f
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) #define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12) #define NUM_SHADER_ENGINES(x) ((x) << 12)
......
...@@ -417,215 +417,17 @@ int ni_init_microcode(struct radeon_device *rdev) ...@@ -417,215 +417,17 @@ int ni_init_microcode(struct radeon_device *rdev)
/* /*
* Core functions * Core functions
*/ */
static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends_per_asic,
u32 *backend_disable_mask_per_asic,
u32 num_shader_engines)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 num_backends_per_se;
u32 cur_pipe;
u32 swizzle_pipe[CAYMAN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
/* force legal values */
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
num_tile_pipes = rdev->config.cayman.max_tile_pipes;
if (num_shader_engines < 1)
num_shader_engines = 1;
if (num_shader_engines > rdev->config.cayman.max_shader_engines)
num_shader_engines = rdev->config.cayman.max_shader_engines;
if (num_backends_per_asic < num_shader_engines)
num_backends_per_asic = num_shader_engines;
if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
/* make sure we have the same number of backends per se */
num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
/* set up the number of backends per se */
num_backends_per_se = num_backends_per_asic / num_shader_engines;
if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
num_backends_per_se = rdev->config.cayman.max_backends_per_se;
num_backends_per_asic = num_backends_per_se * num_shader_engines;
}
/* create enable mask and count for enabled backends */
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends_per_asic)
break;
}
/* force the backends mask to match the current number of backends */
if (enabled_backends_count != num_backends_per_asic) {
u32 this_backend_enabled;
u32 shader_engine;
u32 backend_per_se;
enabled_backends_mask = 0;
enabled_backends_count = 0;
*backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
/* calc the current se */
shader_engine = i / rdev->config.cayman.max_backends_per_se;
/* calc the backend per se */
backend_per_se = i % rdev->config.cayman.max_backends_per_se;
/* default to not enabled */
this_backend_enabled = 0;
if ((shader_engine < num_shader_engines) &&
(backend_per_se < num_backends_per_se))
this_backend_enabled = 1;
if (this_backend_enabled) {
enabled_backends_mask |= (1 << i);
*backend_disable_mask_per_asic &= ~(1 << i);
++enabled_backends_count;
}
}
}
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CAYMAN:
case CHIP_ARUBA:
force_no_swizzle = true;
break;
default:
force_no_swizzle = false;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
force_no_swizzle = false;
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
}
return backend_map;
}
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
u32 disable_mask_per_se,
u32 max_disable_mask_per_se,
u32 num_shader_engines)
{
u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
if (num_shader_engines == 1)
return disable_mask_per_asic;
else if (num_shader_engines == 2)
return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
else
return 0xffffffff;
}
static void cayman_gpu_init(struct radeon_device *rdev) static void cayman_gpu_init(struct radeon_device *rdev)
{ {
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0; u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg; u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 cgts_tcc_disable; u32 cgts_tcc_disable;
u32 sx_debug_1; u32 sx_debug_1;
u32 smx_dc_ctl0; u32 smx_dc_ctl0;
u32 gc_user_shader_pipe_config;
u32 gc_user_rb_backend_disable;
u32 cgts_user_tcc_disable;
u32 cgts_sm_ctrl_reg; u32 cgts_sm_ctrl_reg;
u32 hdp_host_path_cntl; u32 hdp_host_path_cntl;
u32 tmp; u32 tmp;
u32 disabled_rb_mask;
int i, j; int i, j;
switch (rdev->family) { switch (rdev->family) {
...@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x100; rdev->config.cayman.sc_prim_fifo_size = 0x100;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_ARUBA: case CHIP_ARUBA:
default: default:
...@@ -687,6 +490,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -687,6 +490,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x40; rdev->config.cayman.sc_prim_fifo_size = 0x40;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
break; break;
} }
...@@ -706,39 +510,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -706,39 +510,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP); mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
cgts_tcc_disable = 0xffff0000;
for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
cgts_tcc_disable &= ~(1 << (16 + i));
gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
rdev->config.cayman.backend_disable_mask_per_asic =
cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
rdev->config.cayman.num_shader_engines);
rdev->config.cayman.backend_map =
cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
rdev->config.cayman.num_backends_per_se *
rdev->config.cayman.num_shader_engines,
&rdev->config.cayman.backend_disable_mask_per_asic,
rdev->config.cayman.num_shader_engines);
tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
rdev->config.cayman.mem_max_burst_length_bytes = 512;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (rdev->config.cayman.mem_row_size_in_kb > 4) if (rdev->config.cayman.mem_row_size_in_kb > 4)
...@@ -748,73 +519,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -748,73 +519,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.num_gpus = 1; rdev->config.cayman.num_gpus = 1;
rdev->config.cayman.multi_gpu_tile_size = 64; rdev->config.cayman.multi_gpu_tile_size = 64;
//gb_addr_config = 0x02011003
#if 0
gb_addr_config = RREG32(GB_ADDR_CONFIG);
#else
gb_addr_config = 0;
switch (rdev->config.cayman.num_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
switch (rdev->config.cayman.num_gpus) {
case 1:
default:
gb_addr_config |= NUM_GPUS(0);
break;
case 2:
gb_addr_config |= NUM_GPUS(1);
break;
case 4:
gb_addr_config |= NUM_GPUS(2);
break;
}
switch (rdev->config.cayman.multi_gpu_tile_size) {
case 16:
gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
break;
case 32:
default:
gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
break;
case 64:
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
break;
case 128:
gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
break;
}
switch (rdev->config.cayman.mem_row_size_in_kb) {
case 1:
default:
gb_addr_config |= ROW_SIZE(0);
break;
case 2:
gb_addr_config |= ROW_SIZE(1);
break;
case 4:
gb_addr_config |= ROW_SIZE(2);
break;
}
#endif
tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
rdev->config.cayman.num_tile_pipes = (1 << tmp); rdev->config.cayman.num_tile_pipes = (1 << tmp);
tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
...@@ -828,17 +532,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -828,17 +532,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
//gb_backend_map = 0x76541032;
#if 0
gb_backend_map = RREG32(GB_BACKEND_MAP);
#else
gb_backend_map =
cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
rdev->config.cayman.num_backends_per_se *
rdev->config.cayman.num_shader_engines,
&rdev->config.cayman.backend_disable_mask_per_asic,
rdev->config.cayman.num_shader_engines);
#endif
/* setup tiling info dword. gb_addr_config is not adequate since it does /* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword. * not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes * bits 3:0 num_pipes
...@@ -877,25 +571,38 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -877,25 +571,38 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.tile_config |= rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
rdev->config.cayman.backend_map = gb_backend_map; tmp = 0;
WREG32(GB_BACKEND_MAP, gb_backend_map); for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
u32 rb_disable_bitmap;
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
tmp <<= 4;
tmp |= rb_disable_bitmap;
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config);
/* primary versions */ tmp = gb_addr_config & NUM_PIPES_MASK;
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); tmp = r6xx_remap_render_backend(rdev, tmp,
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); rdev->config.cayman.max_backends_per_se *
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); rdev->config.cayman.max_shader_engines,
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
WREG32(GB_BACKEND_MAP, tmp);
cgts_tcc_disable = 0xffff0000;
for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
cgts_tcc_disable &= ~(1 << (16 + i));
WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
/* user versions */
WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
......
...@@ -41,6 +41,9 @@ ...@@ -41,6 +41,9 @@
#define CAYMAN_MAX_TCC 16 #define CAYMAN_MAX_TCC 16
#define CAYMAN_MAX_TCC_MASK 0xFF #define CAYMAN_MAX_TCC_MASK 0xFF
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define DMIF_ADDR_CONFIG 0xBD4 #define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44 #define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0) #define RINGID(x) (((x) & 0x3) << 0)
...@@ -148,6 +151,8 @@ ...@@ -148,6 +151,8 @@
#define CGTS_SYS_TCC_DISABLE 0x3F90 #define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 #define CGTS_USER_SYS_TCC_DISABLE 0x3F94
#define RLC_GFX_INDEX 0x3FC4
#define CONFIG_MEMSIZE 0x5428 #define CONFIG_MEMSIZE 0x5428
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
...@@ -212,6 +217,12 @@ ...@@ -212,6 +217,12 @@
#define SOFT_RESET_VGT (1 << 14) #define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15) #define SOFT_RESET_IA (1 << 15)
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
#define INSTANCE_BROADCAST_WRITES (1 << 30)
#define SE_BROADCAST_WRITES (1 << 31)
#define SCRATCH_REG0 0x8500 #define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504 #define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508 #define SCRATCH_REG2 0x8508
......
...@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev) ...@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
return r600_gpu_soft_reset(rdev); return r600_gpu_soft_reset(rdev);
} }
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 num_backends, u32 tiling_pipe_num,
u32 backend_disable_mask) u32 max_rb_num,
{ u32 total_max_rb_num,
u32 backend_map = 0; u32 disabled_rb_mask)
u32 enabled_backends_mask; {
u32 enabled_backends_count; u32 rendering_pipe_num, rb_num_width, req_rb_num;
u32 cur_pipe; u32 pipe_rb_ratio, pipe_rb_remain;
u32 swizzle_pipe[R6XX_MAX_PIPES]; u32 data = 0, mask = 1 << (max_rb_num - 1);
u32 cur_backend; unsigned i, j;
u32 i;
/* mask out the RBs that don't exist on that asic */
if (num_tile_pipes > R6XX_MAX_PIPES) disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
num_tile_pipes = R6XX_MAX_PIPES;
if (num_tile_pipes < 1) rendering_pipe_num = 1 << tiling_pipe_num;
num_tile_pipes = 1; req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
if (num_backends > R6XX_MAX_BACKENDS) BUG_ON(rendering_pipe_num < req_rb_num);
num_backends = R6XX_MAX_BACKENDS;
if (num_backends < 1) pipe_rb_ratio = rendering_pipe_num / req_rb_num;
num_backends = 1; pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
enabled_backends_mask = 0; if (rdev->family <= CHIP_RV740) {
enabled_backends_count = 0; /* r6xx/r7xx */
for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { rb_num_width = 2;
if (((backend_disable_mask >> i) & 1) == 0) { } else {
enabled_backends_mask |= (1 << i); /* eg+ */
++enabled_backends_count; rb_num_width = 4;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
break;
case 4:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
break;
case 5:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
break;
case 6:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
break;
case 7:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
break;
case 8:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
break;
} }
cur_backend = 0; for (i = 0; i < max_rb_num; i++) {
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { if (!(mask & disabled_rb_mask)) {
while (((1 << cur_backend) & enabled_backends_mask) == 0) for (j = 0; j < pipe_rb_ratio; j++) {
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; data <<= rb_num_width;
data |= max_rb_num - i - 1;
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); }
if (pipe_rb_remain) {
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; data <<= rb_num_width;
data |= max_rb_num - i - 1;
pipe_rb_remain--;
}
}
mask >>= 1;
} }
return backend_map; return data;
} }
int r600_count_pipe_bits(uint32_t val) int r600_count_pipe_bits(uint32_t val)
...@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
{ {
u32 tiling_config; u32 tiling_config;
u32 ramcfg; u32 ramcfg;
u32 backend_map;
u32 cc_rb_backend_disable; u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config; u32 cc_gc_shader_pipe_config;
u32 tmp; u32 tmp;
...@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt = 0; u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0; u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0; u32 sq_stack_resource_mgmt_2 = 0;
u32 disabled_rb_mask;
/* FIXME: implement */ rdev->config.r600.tiling_group_size = 256;
switch (rdev->family) { switch (rdev->family) {
case CHIP_R600: case CHIP_R600:
rdev->config.r600.max_pipes = 4; rdev->config.r600.max_pipes = 4;
...@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.r600.tiling_group_size = 512;
else
rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) { if (tmp > 3) {
tiling_config |= ROW_TILING(3); tiling_config |= ROW_TILING(3);
...@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= BANK_SWAPS(1); tiling_config |= BANK_SWAPS(1);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |= tmp = R6XX_MAX_BACKENDS -
BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
if (tmp < rdev->config.r600.max_backends) {
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; rdev->config.r600.max_backends = tmp;
cc_gc_shader_pipe_config |= }
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |= cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); tmp = R6XX_MAX_PIPES -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, if (tmp < rdev->config.r600.max_pipes) {
(R6XX_MAX_BACKENDS - rdev->config.r600.max_pipes = tmp;
r600_count_pipe_bits((cc_rb_backend_disable & }
R6XX_MAX_BACKENDS_MASK) >> 16)), tmp = R6XX_MAX_SIMDS -
(cc_rb_backend_disable >> 16)); r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
R6XX_MAX_BACKENDS, disabled_rb_mask);
tiling_config |= tmp << 16;
rdev->config.r600.backend_map = tmp;
rdev->config.r600.tile_config = tiling_config; rdev->config.r600.tile_config = tiling_config;
rdev->config.r600.backend_map = backend_map;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config); WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
/* Setup pipes */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
......
...@@ -219,6 +219,8 @@ ...@@ -219,6 +219,8 @@
#define BACKEND_MAP(x) ((x) << 16) #define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0 #define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES(x) ((x) << 8)
......
...@@ -1845,6 +1845,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); ...@@ -1845,6 +1845,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
extern void r600_hdmi_enable(struct drm_encoder *encoder); extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder); extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
u32 total_max_rb_num,
u32 enabled_rb_mask);
/* /*
* evergreen functions used by radeon_encoder.c * evergreen functions used by radeon_encoder.c
......
...@@ -365,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev) ...@@ -365,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
/* /*
* Core functions * Core functions
*/ */
static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R7XX_MAX_BACKENDS)
num_backends = R7XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
}
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
}
break;
case 5:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
}
break;
case 7:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
}
break;
}
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
}
return backend_map;
}
static void rv770_gpu_init(struct radeon_device *rdev) static void rv770_gpu_init(struct radeon_device *rdev)
{ {
int i, j, num_qd_pipes; int i, j, num_qd_pipes;
...@@ -554,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -554,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt; u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl; u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0; u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0; u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0; u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0; u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg; u32 mc_arb_ramcfg;
u32 db_debug4; u32 db_debug4, tmp;
u32 inactive_pipes, shader_pipe_config;
u32 disabled_rb_mask;
unsigned active_number;
/* setup chip specs */ /* setup chip specs */
rdev->config.rv770.tiling_group_size = 256;
switch (rdev->family) { switch (rdev->family) {
case CHIP_RV770: case CHIP_RV770:
rdev->config.rv770.max_pipes = 4; rdev->config.rv770.max_pipes = 4;
...@@ -672,23 +501,60 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -672,23 +501,60 @@ static void rv770_gpu_init(struct radeon_device *rdev)
/* setup tiling, simd, pipe config */ /* setup tiling, simd, pipe config */
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
if (!(inactive_pipes & tmp)) {
active_number++;
}
tmp <<= 1;
}
if (active_number == 1) {
WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
} else {
WREG32(SPI_CONFIG_CNTL, 0);
}
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
if (tmp < rdev->config.rv770.max_backends) {
rdev->config.rv770.max_backends = tmp;
}
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
if (tmp < rdev->config.rv770.max_pipes) {
rdev->config.rv770.max_pipes = tmp;
}
tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.rv770.max_simds) {
rdev->config.rv770.max_simds = tmp;
}
switch (rdev->config.rv770.max_tile_pipes) { switch (rdev->config.rv770.max_tile_pipes) {
case 1: case 1:
default: default:
gb_tiling_config |= PIPE_TILING(0); gb_tiling_config = PIPE_TILING(0);
break; break;
case 2: case 2:
gb_tiling_config |= PIPE_TILING(1); gb_tiling_config = PIPE_TILING(1);
break; break;
case 4: case 4:
gb_tiling_config |= PIPE_TILING(2); gb_tiling_config = PIPE_TILING(2);
break; break;
case 8: case 8:
gb_tiling_config |= PIPE_TILING(3); gb_tiling_config = PIPE_TILING(3);
break; break;
} }
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
R7XX_MAX_BACKENDS, disabled_rb_mask);
gb_tiling_config |= tmp << 16;
rdev->config.rv770.backend_map = tmp;
if (rdev->family == CHIP_RV770) if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1); gb_tiling_config |= BANK_TILING(1);
else { else {
...@@ -699,10 +565,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -699,10 +565,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
} }
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.rv770.tiling_group_size = 512;
else
rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3); gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3); gb_tiling_config |= SAMPLE_SPLIT(3);
...@@ -714,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -714,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
} }
gb_tiling_config |= BANK_SWAPS(1); gb_tiling_config |= BANK_SWAPS(1);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
if (rdev->family == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(rdev,
rdev->config.rv770.max_tile_pipes,
(R7XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
rdev->config.rv770.tile_config = gb_tiling_config; rdev->config.rv770.tile_config = gb_tiling_config;
rdev->config.rv770.backend_map = backend_map;
gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config); WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0); WREG32(CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
...@@ -815,8 +649,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -815,8 +649,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(VGT_NUM_INSTANCES, 1); WREG32(VGT_NUM_INSTANCES, 1);
WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
WREG32(CP_PERFMON_CNTL, 0); WREG32(CP_PERFMON_CNTL, 0);
......
...@@ -106,10 +106,13 @@ ...@@ -106,10 +106,13 @@
#define BACKEND_MAP(x) ((x) << 16) #define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0 #define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00 #define INACTIVE_QD_PIPES_MASK 0x0000FF00
#define INACTIVE_QD_PIPES_SHIFT 8
#define INACTIVE_SIMDS(x) ((x) << 16) #define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000 #define INACTIVE_SIMDS_MASK 0x00FF0000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment