Commit a8061227 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-3.17' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Just a few more radeon fixes for 3.17.

* 'drm-fixes-3.17' of git://people.freedesktop.org/~agd5f/linux:
  radeon: Test for PCI root bus before assuming bus->self
  drm/radeon: handle broken disabled rb mask gracefully (6xx/7xx) (v2)
  drm/radeon: save/restore the PD addr on suspend/resume
parents 52addcf9 0bd252de
...@@ -5749,20 +5749,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) ...@@ -5749,20 +5749,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15D8, 0); WREG32(0x15D8, 0);
WREG32(0x15DC, 0); WREG32(0x15DC, 0);
/* empty context1-15 */ /* restore context1-15 */
/* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */ /* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
for (i = 1; i < 16; i++) { for (i = 1; i < 16; i++) {
if (i < 8) if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12); rdev->vm_manager.saved_table_addr[i]);
else else
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
rdev->gart.table_addr >> 12); rdev->vm_manager.saved_table_addr[i]);
} }
/* enable context1-15 */ /* enable context1-15 */
...@@ -5827,6 +5824,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) ...@@ -5827,6 +5824,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
*/ */
static void cik_pcie_gart_disable(struct radeon_device *rdev) static void cik_pcie_gart_disable(struct radeon_device *rdev)
{ {
unsigned i;
for (i = 1; i < 16; ++i) {
uint32_t reg;
if (i < 8)
reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
else
reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
}
/* Disable all tables */ /* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0);
...@@ -9555,6 +9563,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) ...@@ -9555,6 +9563,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
int ret, i; int ret, i;
u16 tmp16; u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
return;
if (radeon_pcie_gen2 == 0) if (radeon_pcie_gen2 == 0)
return; return;
...@@ -9781,7 +9792,8 @@ static void cik_program_aspm(struct radeon_device *rdev) ...@@ -9781,7 +9792,8 @@ static void cik_program_aspm(struct radeon_device *rdev)
if (orig != data) if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
if (!disable_clkreq) { if (!disable_clkreq &&
!pci_is_root_bus(rdev->pdev->bus)) {
struct pci_dev *root = rdev->pdev->bus->self; struct pci_dev *root = rdev->pdev->bus->self;
u32 lnkcap; u32 lnkcap;
......
...@@ -1271,7 +1271,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) ...@@ -1271,7 +1271,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12); rdev->vm_manager.saved_table_addr[i]);
} }
/* enable context1-7 */ /* enable context1-7 */
...@@ -1303,6 +1303,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) ...@@ -1303,6 +1303,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
static void cayman_pcie_gart_disable(struct radeon_device *rdev) static void cayman_pcie_gart_disable(struct radeon_device *rdev)
{ {
unsigned i;
for (i = 1; i < 8; ++i) {
rdev->vm_manager.saved_table_addr[i] = RREG32(
VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
}
/* Disable all tables */ /* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0);
......
...@@ -1812,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev) ...@@ -1812,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev)
{ {
u32 tiling_config; u32 tiling_config;
u32 ramcfg; u32 ramcfg;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config; u32 cc_gc_shader_pipe_config;
u32 tmp; u32 tmp;
int i, j; int i, j;
...@@ -1939,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev) ...@@ -1939,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev)
} }
tiling_config |= BANK_SWAPS(1); tiling_config |= BANK_SWAPS(1);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
if (tmp < rdev->config.r600.max_backends) {
rdev->config.r600.max_backends = tmp;
}
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
tmp = R6XX_MAX_PIPES -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
if (tmp < rdev->config.r600.max_pipes) {
rdev->config.r600.max_pipes = tmp;
}
tmp = R6XX_MAX_SIMDS -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
tmp = rdev->config.r600.max_simds - tmp = rdev->config.r600.max_simds -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
rdev->config.r600.active_simds = tmp; rdev->config.r600.active_simds = tmp;
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = 0;
for (i = 0; i < rdev->config.r600.max_backends; i++)
tmp |= (1 << i);
/* if all the backends are disabled, fix it up here */
if ((disabled_rb_mask & tmp) == tmp) {
for (i = 0; i < rdev->config.r600.max_backends; i++)
disabled_rb_mask &= ~(1 << i);
}
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
R6XX_MAX_BACKENDS, disabled_rb_mask); R6XX_MAX_BACKENDS, disabled_rb_mask);
......
...@@ -915,6 +915,8 @@ struct radeon_vm_manager { ...@@ -915,6 +915,8 @@ struct radeon_vm_manager {
u64 vram_base_offset; u64 vram_base_offset;
/* is vm enabled? */ /* is vm enabled? */
bool enabled; bool enabled;
/* for hw to save the PD addr on suspend/resume */
uint32_t saved_table_addr[RADEON_NUM_VM];
}; };
/* /*
......
...@@ -1177,7 +1177,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -1177,7 +1177,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
u32 hdp_host_path_cntl; u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0; u32 sq_dyn_gpr_size_simd_ab_0;
u32 gb_tiling_config = 0; u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0; u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg; u32 mc_arb_ramcfg;
u32 db_debug4, tmp; u32 db_debug4, tmp;
...@@ -1311,21 +1310,7 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -1311,21 +1310,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(SPI_CONFIG_CNTL, 0); WREG32(SPI_CONFIG_CNTL, 0);
} }
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
if (tmp < rdev->config.rv770.max_backends) {
rdev->config.rv770.max_backends = tmp;
}
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
if (tmp < rdev->config.rv770.max_pipes) {
rdev->config.rv770.max_pipes = tmp;
}
tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.rv770.max_simds) {
rdev->config.rv770.max_simds = tmp;
}
tmp = rdev->config.rv770.max_simds - tmp = rdev->config.rv770.max_simds -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
rdev->config.rv770.active_simds = tmp; rdev->config.rv770.active_simds = tmp;
...@@ -1348,6 +1333,14 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -1348,6 +1333,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
tmp = 0;
for (i = 0; i < rdev->config.rv770.max_backends; i++)
tmp |= (1 << i);
/* if all the backends are disabled, fix it up here */
if ((disabled_rb_mask & tmp) == tmp) {
for (i = 0; i < rdev->config.rv770.max_backends; i++)
disabled_rb_mask &= ~(1 << i);
}
tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
R7XX_MAX_BACKENDS, disabled_rb_mask); R7XX_MAX_BACKENDS, disabled_rb_mask);
......
...@@ -4290,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) ...@@ -4290,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
for (i = 1; i < 16; i++) { for (i = 1; i < 16; i++) {
if (i < 8) if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12); rdev->vm_manager.saved_table_addr[i]);
else else
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
rdev->gart.table_addr >> 12); rdev->vm_manager.saved_table_addr[i]);
} }
/* enable context1-15 */ /* enable context1-15 */
...@@ -4325,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) ...@@ -4325,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
static void si_pcie_gart_disable(struct radeon_device *rdev) static void si_pcie_gart_disable(struct radeon_device *rdev)
{ {
unsigned i;
for (i = 1; i < 16; ++i) {
uint32_t reg;
if (i < 8)
reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
else
reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
}
/* Disable all tables */ /* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0);
...@@ -7177,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) ...@@ -7177,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
int ret, i; int ret, i;
u16 tmp16; u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
return;
if (radeon_pcie_gen2 == 0) if (radeon_pcie_gen2 == 0)
return; return;
...@@ -7454,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev) ...@@ -7454,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev)
if (orig != data) if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_CNTL, data); WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
if (!disable_clkreq) { if (!disable_clkreq &&
!pci_is_root_bus(rdev->pdev->bus)) {
struct pci_dev *root = rdev->pdev->bus->self; struct pci_dev *root = rdev->pdev->bus->self;
u32 lnkcap; u32 lnkcap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment