Commit 6b46362c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (52 commits)
  drm/kms: Init the CRTC info fields for modes forced from the command line.
  drm/radeon/r600: CS parser updates
  drm/radeon/kms: add debugfs for power management for AtomBIOS devices
  drm/radeon/kms: initial mode validation support
  drm/radeon/kms/atom/dce3: call transmitter init on mode set
  drm/radeon/kms: store detailed connector info
  drm/radeon/kms/atom/dce3: fix up usPixelClock calculation for Transmitter tables
  drm/radeon/kms/r600: fix rs880 support v2
  drm/radeon/kms/r700: fix some typos in chip init
  drm/radeon/kms: remove some misleading debugging output
  drm/radeon/kms: stop putting VRAM at 0 in MC space on r600s.
  drm/radeon/kms: disable D1VGA and D2VGA if enabled
  drm/radeon/kms: Don't RMW CP_RB_CNTL
  drm/radeon/kms: fix coherency issues on AGP cards.
  drm/radeon/kms: fix rc410 suspend/resume.
  drm/radeon/kms: add quirk for hp dc5750
  drm/radeon/kms/atom: fix potential oops in spread spectrum code
  drm/kms: typo fix
  drm/radeon/kms/atom: Make card_info per device
  drm/radeon/kms/atom: Fix DVO support
  ...
parents 961767b7 eeba5751
...@@ -331,6 +331,7 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *conn ...@@ -331,6 +331,7 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *conn
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace, cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins); cmdline_mode->margins);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
list_add(&mode->head, &connector->modes); list_add(&mode->head, &connector->modes);
return mode; return mode;
} }
......
...@@ -707,7 +707,7 @@ int drm_fb_helper_set_par(struct fb_info *info) ...@@ -707,7 +707,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(&fb_helper->crtc_info->mode_set); ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
if (ret) if (ret)
return ret; return ret;
......
...@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \ ...@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o r600_blit_kms.o radeon_pm.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
......
...@@ -2314,7 +2314,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { ...@@ -2314,7 +2314,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
UCHAR ucSS_Step; UCHAR ucSS_Step;
UCHAR ucSS_Delay; UCHAR ucSS_Delay;
UCHAR ucSS_Id; UCHAR ucSS_Id;
UCHAR ucRecommandedRef_Div; UCHAR ucRecommendedRef_Div;
UCHAR ucSS_Range; /* it was reserved for V11 */ UCHAR ucSS_Range; /* it was reserved for V11 */
} ATOM_SPREAD_SPECTRUM_ASSIGNMENT; } ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
......
This diff is collapsed.
...@@ -186,7 +186,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) ...@@ -186,7 +186,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
int r100_irq_process(struct radeon_device *rdev) int r100_irq_process(struct radeon_device *rdev)
{ {
uint32_t status; uint32_t status, msi_rearm;
status = r100_irq_ack(rdev); status = r100_irq_ack(rdev);
if (!status) { if (!status) {
...@@ -209,6 +209,21 @@ int r100_irq_process(struct radeon_device *rdev) ...@@ -209,6 +209,21 @@ int r100_irq_process(struct radeon_device *rdev)
} }
status = r100_irq_ack(rdev); status = r100_irq_ack(rdev);
} }
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
case CHIP_RS480:
msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
WREG32(RADEON_AIC_CNTL, msi_rearm);
WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
break;
default:
msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
WREG32(RADEON_MSI_REARM_EN, msi_rearm);
WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
break;
}
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -240,7 +255,7 @@ int r100_wb_init(struct radeon_device *rdev) ...@@ -240,7 +255,7 @@ int r100_wb_init(struct radeon_device *rdev)
int r; int r;
if (rdev->wb.wb_obj == NULL) { if (rdev->wb.wb_obj == NULL) {
r = radeon_object_create(rdev, NULL, 4096, r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
true, true,
RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_GTT,
false, &rdev->wb.wb_obj); false, &rdev->wb.wb_obj);
...@@ -563,19 +578,19 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) ...@@ -563,19 +578,19 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
indirect1_start = 16; indirect1_start = 16;
/* cp setup */ /* cp setup */
WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
WREG32(RADEON_CP_RB_CNTL, tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
#ifdef __BIG_ENDIAN
RADEON_BUF_SWAP_32BIT |
#endif
REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) | REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
REG_SET(RADEON_MAX_FETCH, max_fetch) | REG_SET(RADEON_MAX_FETCH, max_fetch) |
RADEON_RB_NO_UPDATE); RADEON_RB_NO_UPDATE);
#ifdef __BIG_ENDIAN
tmp |= RADEON_BUF_SWAP_32BIT;
#endif
WREG32(RADEON_CP_RB_CNTL, tmp);
/* Set ring address */ /* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */ /* Force read & write ptr to 0 */
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0); WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0); WREG32(RADEON_CP_RB_WPTR, 0);
...@@ -2364,7 +2379,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) ...@@ -2364,7 +2379,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/* /*
Find the total latency for the display data. Find the total latency for the display data.
*/ */
disp_latency_overhead.full = rfixed_const(80); disp_latency_overhead.full = rfixed_const(8);
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
...@@ -2562,8 +2577,11 @@ void r100_bandwidth_update(struct radeon_device *rdev) ...@@ -2562,8 +2577,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{ {
DRM_ERROR("pitch %d\n", t->pitch); DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width); DRM_ERROR("width %d\n", t->width);
DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height); DRM_ERROR("height %d\n", t->height);
DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels); DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth); DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp); DRM_ERROR("bpp %d\n", t->cpp);
...@@ -2623,15 +2641,17 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, ...@@ -2623,15 +2641,17 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
else else
w = track->textures[u].pitch / (1 << i); w = track->textures[u].pitch / (1 << i);
} else { } else {
w = track->textures[u].width / (1 << i); w = track->textures[u].width;
if (rdev->family >= CHIP_RV515) if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11; w |= track->textures[u].width_11;
w = w / (1 << i);
if (track->textures[u].roundup_w) if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w); w = roundup_pow_of_two(w);
} }
h = track->textures[u].height / (1 << i); h = track->textures[u].height;
if (rdev->family >= CHIP_RV515) if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11; h |= track->textures[u].height_11;
h = h / (1 << i);
if (track->textures[u].roundup_h) if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h); h = roundup_pow_of_two(h);
size += w * h; size += w * h;
......
...@@ -113,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) ...@@ -113,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096; tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
......
...@@ -311,6 +311,8 @@ int r420_init(struct radeon_device *rdev) ...@@ -311,6 +311,8 @@ int r420_init(struct radeon_device *rdev)
} }
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
r300_vram_info(rdev); r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
......
...@@ -384,9 +384,16 @@ ...@@ -384,9 +384,16 @@
# define AVIVO_D1GRPH_TILED (1 << 20) # define AVIVO_D1GRPH_TILED (1 << 20)
# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) # define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
* block and vice versa. This applies to GRPH, CUR, etc.
*/
#define AVIVO_D1GRPH_LUT_SEL 0x6108 #define AVIVO_D1GRPH_LUT_SEL 0x6108
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 #define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 #define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
#define AVIVO_D1GRPH_PITCH 0x6120 #define AVIVO_D1GRPH_PITCH 0x6120
#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124 #define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128 #define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
...@@ -404,6 +411,8 @@ ...@@ -404,6 +411,8 @@
# define AVIVO_D1CURSOR_MODE_MASK (3 << 8) # define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
# define AVIVO_D1CURSOR_MODE_24BPP 2 # define AVIVO_D1CURSOR_MODE_24BPP 2
#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408 #define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
#define R700_D1CUR_SURFACE_ADDRESS_HIGH 0x6c0c
#define R700_D2CUR_SURFACE_ADDRESS_HIGH 0x640c
#define AVIVO_D1CUR_SIZE 0x6410 #define AVIVO_D1CUR_SIZE 0x6410
#define AVIVO_D1CUR_POSITION 0x6414 #define AVIVO_D1CUR_POSITION 0x6414
#define AVIVO_D1CUR_HOT_SPOT 0x6418 #define AVIVO_D1CUR_HOT_SPOT 0x6418
......
...@@ -260,6 +260,8 @@ int r520_init(struct radeon_device *rdev) ...@@ -260,6 +260,8 @@ int r520_init(struct radeon_device *rdev)
} }
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
r520_vram_info(rdev); r520_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
......
...@@ -339,11 +339,10 @@ int r600_mc_init(struct radeon_device *rdev) ...@@ -339,11 +339,10 @@ int r600_mc_init(struct radeon_device *rdev)
{ {
fixed20_12 a; fixed20_12 a;
u32 tmp; u32 tmp;
int chansize; int chansize, numchan;
int r; int r;
/* Get VRAM informations */ /* Get VRAM informations */
rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true; rdev->mc.vram_is_ddr = true;
tmp = RREG32(RAMCFG); tmp = RREG32(RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) { if (tmp & CHANSIZE_OVERRIDE) {
...@@ -353,17 +352,23 @@ int r600_mc_init(struct radeon_device *rdev) ...@@ -353,17 +352,23 @@ int r600_mc_init(struct radeon_device *rdev)
} else { } else {
chansize = 32; chansize = 32;
} }
if (rdev->family == CHIP_R600) { tmp = RREG32(CHMAP);
rdev->mc.vram_width = 8 * chansize; switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
} else if (rdev->family == CHIP_RV670) { case 0:
rdev->mc.vram_width = 4 * chansize; default:
} else if ((rdev->family == CHIP_RV610) || numchan = 1;
(rdev->family == CHIP_RV620)) { break;
rdev->mc.vram_width = chansize; case 1:
} else if ((rdev->family == CHIP_RV630) || numchan = 2;
(rdev->family == CHIP_RV635)) { break;
rdev->mc.vram_width = 2 * chansize; case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
} }
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */ /* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
...@@ -404,35 +409,29 @@ int r600_mc_init(struct radeon_device *rdev) ...@@ -404,35 +409,29 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.gtt_location = rdev->mc.mc_vram_size; rdev->mc.gtt_location = rdev->mc.mc_vram_size;
} }
} else { } else {
if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) & rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
0xFFFF) << 24; 0xFFFF) << 24;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { /* Enough place after vram */
/* Enough place after vram */ rdev->mc.gtt_location = tmp;
rdev->mc.gtt_location = tmp; } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) { /* Enough place before vram */
/* Enough place before vram */ rdev->mc.gtt_location = 0;
} else {
/* Not enough place after or before shrink
* gart size
*/
if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
rdev->mc.gtt_location = 0; rdev->mc.gtt_location = 0;
rdev->mc.gtt_size = rdev->mc.vram_location;
} else { } else {
/* Not enough place after or before shrink rdev->mc.gtt_location = tmp;
* gart size rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
*/
if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
rdev->mc.gtt_location = 0;
rdev->mc.gtt_size = rdev->mc.vram_location;
} else {
rdev->mc.gtt_location = tmp;
rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
}
} }
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
} else {
rdev->mc.vram_location = 0x00000000UL;
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
} }
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
} }
rdev->mc.vram_start = rdev->mc.vram_location; rdev->mc.vram_start = rdev->mc.vram_location;
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
...@@ -859,7 +858,8 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -859,7 +858,8 @@ void r600_gpu_init(struct radeon_device *rdev)
((rdev->family) == CHIP_RV630) || ((rdev->family) == CHIP_RV630) ||
((rdev->family) == CHIP_RV610) || ((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) || ((rdev->family) == CHIP_RV620) ||
((rdev->family) == CHIP_RS780)) { ((rdev->family) == CHIP_RS780) ||
((rdev->family) == CHIP_RS880)) {
WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
} else { } else {
WREG32(DB_DEBUG, 0); WREG32(DB_DEBUG, 0);
...@@ -876,7 +876,8 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -876,7 +876,8 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = RREG32(SQ_MS_FIFO_SIZES); tmp = RREG32(SQ_MS_FIFO_SIZES);
if (((rdev->family) == CHIP_RV610) || if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) || ((rdev->family) == CHIP_RV620) ||
((rdev->family) == CHIP_RS780)) { ((rdev->family) == CHIP_RS780) ||
((rdev->family) == CHIP_RS880)) {
tmp = (CACHE_FIFO_SIZE(0xa) | tmp = (CACHE_FIFO_SIZE(0xa) |
FETCH_FIFO_HIWATER(0xa) | FETCH_FIFO_HIWATER(0xa) |
DONE_FIFO_HIWATER(0xe0) | DONE_FIFO_HIWATER(0xe0) |
...@@ -919,7 +920,8 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -919,7 +920,8 @@ void r600_gpu_init(struct radeon_device *rdev)
NUM_ES_STACK_ENTRIES(0)); NUM_ES_STACK_ENTRIES(0));
} else if (((rdev->family) == CHIP_RV610) || } else if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) || ((rdev->family) == CHIP_RV620) ||
((rdev->family) == CHIP_RS780)) { ((rdev->family) == CHIP_RS780) ||
((rdev->family) == CHIP_RS880)) {
/* no vertex cache */ /* no vertex cache */
sq_config &= ~VC_ENABLE; sq_config &= ~VC_ENABLE;
...@@ -976,7 +978,8 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -976,7 +978,8 @@ void r600_gpu_init(struct radeon_device *rdev)
if (((rdev->family) == CHIP_RV610) || if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) || ((rdev->family) == CHIP_RV620) ||
((rdev->family) == CHIP_RS780)) { ((rdev->family) == CHIP_RS780) ||
((rdev->family) == CHIP_RS880)) {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
} else { } else {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
...@@ -1002,8 +1005,9 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1002,8 +1005,9 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = rdev->config.r600.max_pipes * 16; tmp = rdev->config.r600.max_pipes * 16;
switch (rdev->family) { switch (rdev->family) {
case CHIP_RV610: case CHIP_RV610:
case CHIP_RS780:
case CHIP_RV620: case CHIP_RV620:
case CHIP_RS780:
case CHIP_RS880:
tmp += 32; tmp += 32;
break; break;
case CHIP_RV670: case CHIP_RV670:
...@@ -1044,8 +1048,9 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1044,8 +1048,9 @@ void r600_gpu_init(struct radeon_device *rdev)
switch (rdev->family) { switch (rdev->family) {
case CHIP_RV610: case CHIP_RV610:
case CHIP_RS780:
case CHIP_RV620: case CHIP_RV620:
case CHIP_RS780:
case CHIP_RS880:
tmp = TC_L2_SIZE(8); tmp = TC_L2_SIZE(8);
break; break;
case CHIP_RV630: case CHIP_RV630:
...@@ -1267,19 +1272,17 @@ int r600_cp_resume(struct radeon_device *rdev) ...@@ -1267,19 +1272,17 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */ /* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8); rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE | tmp |= BUF_SWAP_32BIT;
(drm_order(4096/8) << 8) | rb_bufsz);
#else
WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
#endif #endif
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x4); WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */ /* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0); WREG32(CP_RB_WPTR_DELAY, 0);
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
tmp = RREG32(CP_RB_CNTL);
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0); WREG32(CP_RB_WPTR, 0);
...@@ -1400,7 +1403,7 @@ int r600_wb_enable(struct radeon_device *rdev) ...@@ -1400,7 +1403,7 @@ int r600_wb_enable(struct radeon_device *rdev)
int r; int r;
if (rdev->wb.wb_obj == NULL) { if (rdev->wb.wb_obj == NULL) {
r = radeon_object_create(rdev, NULL, 4096, true, r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
...@@ -1450,8 +1453,8 @@ int r600_copy_blit(struct radeon_device *rdev, ...@@ -1450,8 +1453,8 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence) unsigned num_pages, struct radeon_fence *fence)
{ {
r600_blit_prepare_copy(rdev, num_pages * 4096); r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096); r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence); r600_blit_done_copy(rdev, fence);
return 0; return 0;
} }
...@@ -1632,10 +1635,13 @@ int r600_init(struct radeon_device *rdev) ...@@ -1632,10 +1635,13 @@ int r600_init(struct radeon_device *rdev)
r600_scratch_init(rdev); r600_scratch_init(rdev);
/* Initialize surface registers */ /* Initialize surface registers */
radeon_surface_init(rdev); radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev); r = radeon_clocks_init(rdev);
if (r) if (r)
return r; return r;
/* Initialize power management */
radeon_pm_init(rdev);
/* Fence driver */ /* Fence driver */
r = radeon_fence_driver_init(rdev); r = radeon_fence_driver_init(rdev);
if (r) if (r)
......
...@@ -582,6 +582,8 @@ r600_blit_copy(struct drm_device *dev, ...@@ -582,6 +582,8 @@ r600_blit_copy(struct drm_device *dev,
u64 vb_addr; u64 vb_addr;
u32 *vb; u32 *vb;
vb = r600_nomm_get_vb_ptr(dev);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192; max_bytes = 8192;
...@@ -617,8 +619,8 @@ r600_blit_copy(struct drm_device *dev, ...@@ -617,8 +619,8 @@ r600_blit_copy(struct drm_device *dev,
if (!dev_priv->blit_vb) if (!dev_priv->blit_vb)
return; return;
set_shaders(dev); set_shaders(dev);
vb = r600_nomm_get_vb_ptr(dev);
} }
vb = r600_nomm_get_vb_ptr(dev);
vb[0] = i2f(dst_x); vb[0] = i2f(dst_x);
vb[1] = 0; vb[1] = 0;
...@@ -706,8 +708,8 @@ r600_blit_copy(struct drm_device *dev, ...@@ -706,8 +708,8 @@ r600_blit_copy(struct drm_device *dev,
return; return;
set_shaders(dev); set_shaders(dev);
vb = r600_nomm_get_vb_ptr(dev);
} }
vb = r600_nomm_get_vb_ptr(dev);
vb[0] = i2f(dst_x / 4); vb[0] = i2f(dst_x / 4);
vb[1] = 0; vb[1] = 0;
...@@ -772,6 +774,7 @@ r600_blit_swap(struct drm_device *dev, ...@@ -772,6 +774,7 @@ r600_blit_swap(struct drm_device *dev,
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
int cb_format, tex_format; int cb_format, tex_format;
int sx2, sy2, dx2, dy2;
u64 vb_addr; u64 vb_addr;
u32 *vb; u32 *vb;
...@@ -786,16 +789,10 @@ r600_blit_swap(struct drm_device *dev, ...@@ -786,16 +789,10 @@ r600_blit_swap(struct drm_device *dev,
} }
vb = r600_nomm_get_vb_ptr(dev); vb = r600_nomm_get_vb_ptr(dev);
if (cpp == 4) { sx2 = sx + w;
cb_format = COLOR_8_8_8_8; sy2 = sy + h;
tex_format = FMT_8_8_8_8; dx2 = dx + w;
} else if (cpp == 2) { dy2 = dy + h;
cb_format = COLOR_5_6_5;
tex_format = FMT_5_6_5;
} else {
cb_format = COLOR_8;
tex_format = FMT_8;
}
vb[0] = i2f(dx); vb[0] = i2f(dx);
vb[1] = i2f(dy); vb[1] = i2f(dy);
...@@ -803,31 +800,46 @@ r600_blit_swap(struct drm_device *dev, ...@@ -803,31 +800,46 @@ r600_blit_swap(struct drm_device *dev,
vb[3] = i2f(sy); vb[3] = i2f(sy);
vb[4] = i2f(dx); vb[4] = i2f(dx);
vb[5] = i2f(dy + h); vb[5] = i2f(dy2);
vb[6] = i2f(sx); vb[6] = i2f(sx);
vb[7] = i2f(sy + h); vb[7] = i2f(sy2);
vb[8] = i2f(dx2);
vb[9] = i2f(dy2);
vb[10] = i2f(sx2);
vb[11] = i2f(sy2);
vb[8] = i2f(dx + w); switch(cpp) {
vb[9] = i2f(dy + h); case 4:
vb[10] = i2f(sx + w); cb_format = COLOR_8_8_8_8;
vb[11] = i2f(sy + h); tex_format = FMT_8_8_8_8;
break;
case 2:
cb_format = COLOR_5_6_5;
tex_format = FMT_5_6_5;
break;
default:
cb_format = COLOR_8;
tex_format = FMT_8;
break;
}
/* src */ /* src */
set_tex_resource(dev_priv, tex_format, set_tex_resource(dev_priv, tex_format,
src_pitch / cpp, src_pitch / cpp,
sy + h, src_pitch / cpp, sy2, src_pitch / cpp,
src_gpu_addr); src_gpu_addr);
cp_set_surface_sync(dev_priv, cp_set_surface_sync(dev_priv,
R600_TC_ACTION_ENA, (src_pitch * (sy + h)), src_gpu_addr); R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
/* dst */ /* dst */
set_render_target(dev_priv, cb_format, set_render_target(dev_priv, cb_format,
dst_pitch / cpp, dy + h, dst_pitch / cpp, dy2,
dst_gpu_addr); dst_gpu_addr);
/* scissors */ /* scissors */
set_scissors(dev_priv, dx, dy, dx + w, dy + h); set_scissors(dev_priv, dx, dy, dx2, dy2);
/* Vertex buffer setup */ /* Vertex buffer setup */
vb_addr = dev_priv->gart_buffers_offset + vb_addr = dev_priv->gart_buffers_offset +
...@@ -840,7 +852,7 @@ r600_blit_swap(struct drm_device *dev, ...@@ -840,7 +852,7 @@ r600_blit_swap(struct drm_device *dev,
cp_set_surface_sync(dev_priv, cp_set_surface_sync(dev_priv,
R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
dst_pitch * (dy + h), dst_gpu_addr); dst_pitch * dy2, dst_gpu_addr);
dev_priv->blit_vb->used += 12 * 4; dev_priv->blit_vb->used += 12 * 4;
} }
...@@ -368,7 +368,7 @@ set_default_state(struct radeon_device *rdev) ...@@ -368,7 +368,7 @@ set_default_state(struct radeon_device *rdev)
if ((rdev->family == CHIP_RV610) || if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) || (rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS780) ||
(rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) ||
(rdev->family == CHIP_RV710)) (rdev->family == CHIP_RV710))
sq_config = 0; sq_config = 0;
else else
...@@ -610,6 +610,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, ...@@ -610,6 +610,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
size_bytes, rdev->r600_blit.vb_used); size_bytes, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192; max_bytes = 8192;
...@@ -652,7 +653,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, ...@@ -652,7 +653,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
vb = r600_nomm_get_vb_ptr(dev); vb = r600_nomm_get_vb_ptr(dev);
#endif #endif
} }
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
vb[0] = i2f(dst_x); vb[0] = i2f(dst_x);
vb[1] = 0; vb[1] = 0;
...@@ -747,7 +747,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, ...@@ -747,7 +747,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
vb = r600_nomm_get_vb_ptr(dev); vb = r600_nomm_get_vb_ptr(dev);
} }
#endif #endif
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
vb[0] = i2f(dst_x / 4); vb[0] = i2f(dst_x / 4);
vb[1] = 0; vb[1] = 0;
......
...@@ -466,6 +466,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ...@@ -466,6 +466,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < pkt->count; i++) { for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i); reg = start_reg + (4 * i);
switch (reg) { switch (reg) {
case SQ_ESGS_RING_BASE:
case SQ_GSVS_RING_BASE:
case SQ_ESTMP_RING_BASE:
case SQ_GSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_FBUF_RING_BASE:
case SQ_REDUC_RING_BASE:
case SX_MEMORY_EXPORT_BASE:
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_CONFIG_REG "
"0x%04X\n", reg);
return -EINVAL;
}
ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case CP_COHER_BASE: case CP_COHER_BASE:
/* use PACKET3_SURFACE_SYNC */ /* use PACKET3_SURFACE_SYNC */
return -EINVAL; return -EINVAL;
...@@ -487,6 +504,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ...@@ -487,6 +504,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
reg = start_reg + (4 * i); reg = start_reg + (4 * i);
switch (reg) { switch (reg) {
case DB_DEPTH_BASE: case DB_DEPTH_BASE:
case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE: case CB_COLOR0_BASE:
case CB_COLOR1_BASE: case CB_COLOR1_BASE:
case CB_COLOR2_BASE: case CB_COLOR2_BASE:
......
...@@ -119,6 +119,7 @@ ...@@ -119,6 +119,7 @@
#define DB_DEBUG 0x9830 #define DB_DEBUG 0x9830
#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) #define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
#define DB_DEPTH_BASE 0x2800C #define DB_DEPTH_BASE 0x2800C
#define DB_HTILE_DATA_BASE 0x28014
#define DB_WATERMARKS 0x9838 #define DB_WATERMARKS 0x9838
#define DEPTH_FREE(x) ((x) << 0) #define DEPTH_FREE(x) ((x) << 0)
#define DEPTH_FLUSH(x) ((x) << 5) #define DEPTH_FLUSH(x) ((x) << 5)
...@@ -171,6 +172,14 @@ ...@@ -171,6 +172,14 @@
#define SQ_STACK_RESOURCE_MGMT_2 0x8c14 #define SQ_STACK_RESOURCE_MGMT_2 0x8c14
# define NUM_GS_STACK_ENTRIES(x) ((x) << 0) # define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
# define NUM_ES_STACK_ENTRIES(x) ((x) << 16) # define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
#define SQ_ESGS_RING_BASE 0x8c40
#define SQ_GSVS_RING_BASE 0x8c48
#define SQ_ESTMP_RING_BASE 0x8c50
#define SQ_GSTMP_RING_BASE 0x8c58
#define SQ_VSTMP_RING_BASE 0x8c60
#define SQ_PSTMP_RING_BASE 0x8c68
#define SQ_FBUF_RING_BASE 0x8c70
#define SQ_REDUC_RING_BASE 0x8c78
#define GRBM_CNTL 0x8000 #define GRBM_CNTL 0x8000
# define GRBM_READ_TIMEOUT(x) ((x) << 0) # define GRBM_READ_TIMEOUT(x) ((x) << 0)
...@@ -271,6 +280,10 @@ ...@@ -271,6 +280,10 @@
#define PCIE_PORT_INDEX 0x0038 #define PCIE_PORT_INDEX 0x0038
#define PCIE_PORT_DATA 0x003C #define PCIE_PORT_DATA 0x003C
#define CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
#define RAMCFG 0x2408 #define RAMCFG 0x2408
#define NOOFBANK_SHIFT 0 #define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000001 #define NOOFBANK_MASK 0x00000001
...@@ -352,6 +365,7 @@ ...@@ -352,6 +365,7 @@
#define SX_MISC 0x28350 #define SX_MISC 0x28350
#define SX_MEMORY_EXPORT_BASE 0x9010
#define SX_DEBUG_1 0x9054 #define SX_DEBUG_1 0x9054
#define SMX_EVENT_RELEASE (1 << 0) #define SMX_EVENT_RELEASE (1 << 0)
#define ENABLE_NEW_SMX_ADDRESS (1 << 16) #define ENABLE_NEW_SMX_ADDRESS (1 << 16)
......
...@@ -139,6 +139,10 @@ struct radeon_clock { ...@@ -139,6 +139,10 @@ struct radeon_clock {
uint32_t default_sclk; uint32_t default_sclk;
}; };
/*
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
/* /*
* Fences. * Fences.
...@@ -276,6 +280,8 @@ union radeon_gart_table { ...@@ -276,6 +280,8 @@ union radeon_gart_table {
struct radeon_gart_table_vram vram; struct radeon_gart_table_vram vram;
}; };
#define RADEON_GPU_PAGE_SIZE 4096
struct radeon_gart { struct radeon_gart {
dma_addr_t table_addr; dma_addr_t table_addr;
unsigned num_gpu_pages; unsigned num_gpu_pages;
...@@ -621,7 +627,9 @@ struct radeon_asic { ...@@ -621,7 +627,9 @@ struct radeon_asic {
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t (*get_memory_clock)(struct radeon_device *rdev);
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable); void (*set_clock_gating)(struct radeon_device *rdev, int enable);
...@@ -783,6 +791,7 @@ struct radeon_device { ...@@ -783,6 +791,7 @@ struct radeon_device {
const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
struct r600_blit r600_blit; struct r600_blit r600_blit;
int msi_enabled; /* msi enabled */
}; };
int radeon_device_init(struct radeon_device *rdev, int radeon_device_init(struct radeon_device *rdev,
...@@ -952,7 +961,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) ...@@ -952,7 +961,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
......
...@@ -31,10 +31,13 @@ ...@@ -31,10 +31,13 @@
/* /*
* common functions * common functions
*/ */
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
...@@ -95,7 +98,9 @@ static struct radeon_asic r100_asic = { ...@@ -95,7 +98,9 @@ static struct radeon_asic r100_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = NULL, .copy_dma = NULL,
.copy = &r100_copy_blit, .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL, .set_memory_clock = NULL,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating, .set_clock_gating = &radeon_legacy_set_clock_gating,
...@@ -148,7 +153,9 @@ static struct radeon_asic r300_asic = { ...@@ -148,7 +153,9 @@ static struct radeon_asic r300_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma, .copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit, .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL, .set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating, .set_clock_gating = &radeon_legacy_set_clock_gating,
...@@ -185,7 +192,9 @@ static struct radeon_asic r420_asic = { ...@@ -185,7 +192,9 @@ static struct radeon_asic r420_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma, .copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit, .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
...@@ -227,7 +236,9 @@ static struct radeon_asic rs400_asic = { ...@@ -227,7 +236,9 @@ static struct radeon_asic rs400_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma, .copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit, .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL, .set_memory_clock = NULL,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating, .set_clock_gating = &radeon_legacy_set_clock_gating,
...@@ -273,7 +284,9 @@ static struct radeon_asic rs600_asic = { ...@@ -273,7 +284,9 @@ static struct radeon_asic rs600_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma, .copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit, .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
...@@ -312,7 +325,9 @@ static struct radeon_asic rs690_asic = { ...@@ -312,7 +325,9 @@ static struct radeon_asic rs690_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma, .copy_dma = &r300_copy_dma,
.copy = &r300_copy_dma, .copy = &r300_copy_dma,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
...@@ -357,7 +372,9 @@ static struct radeon_asic rv515_asic = { ...@@ -357,7 +372,9 @@ static struct radeon_asic rv515_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma, .copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit, .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
...@@ -393,7 +410,9 @@ static struct radeon_asic r520_asic = { ...@@ -393,7 +410,9 @@ static struct radeon_asic r520_asic = {
.copy_blit = &r100_copy_blit, .copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma, .copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit, .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
...@@ -456,7 +475,9 @@ static struct radeon_asic r600_asic = { ...@@ -456,7 +475,9 @@ static struct radeon_asic r600_asic = {
.copy_blit = &r600_copy_blit, .copy_blit = &r600_copy_blit,
.copy_dma = &r600_copy_blit, .copy_dma = &r600_copy_blit,
.copy = &r600_copy_blit, .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
...@@ -493,7 +514,9 @@ static struct radeon_asic rv770_asic = { ...@@ -493,7 +514,9 @@ static struct radeon_asic rv770_asic = {
.copy_blit = &r600_copy_blit, .copy_blit = &r600_copy_blit,
.copy_dma = &r600_copy_blit, .copy_dma = &r600_copy_blit,
.copy = &r600_copy_blit, .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating, .set_clock_gating = &radeon_atom_set_clock_gating,
......
This diff is collapsed.
...@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, ...@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, ...@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
......
...@@ -50,19 +50,16 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) ...@@ -50,19 +50,16 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
vram_base = drm_get_resource_start(rdev->ddev, 0); vram_base = drm_get_resource_start(rdev->ddev, 0);
bios = ioremap(vram_base, size); bios = ioremap(vram_base, size);
if (!bios) { if (!bios) {
DRM_ERROR("Unable to mmap vram\n");
return false; return false;
} }
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
iounmap(bios); iounmap(bios);
DRM_ERROR("bad rom signature\n");
return false; return false;
} }
rdev->bios = kmalloc(size, GFP_KERNEL); rdev->bios = kmalloc(size, GFP_KERNEL);
if (rdev->bios == NULL) { if (rdev->bios == NULL) {
iounmap(bios); iounmap(bios);
DRM_ERROR("kmalloc failed\n");
return false; return false;
} }
memcpy(rdev->bios, bios, size); memcpy(rdev->bios, bios, size);
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include "atom.h" #include "atom.h"
/* 10 khz */ /* 10 khz */
static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
{ {
struct radeon_pll *spll = &rdev->clock.spll; struct radeon_pll *spll = &rdev->clock.spll;
uint32_t fb_div, ref_div, post_div, sclk; uint32_t fb_div, ref_div, post_div, sclk;
......
This diff is collapsed.
This diff is collapsed.
...@@ -109,9 +109,15 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, ...@@ -109,9 +109,15 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private; struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
else
WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
}
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
else { } else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */ /* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
......
...@@ -444,20 +444,24 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) ...@@ -444,20 +444,24 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r; return r;
} }
static struct card_info atom_card_info = {
.dev = NULL,
.reg_read = cail_reg_read,
.reg_write = cail_reg_write,
.mc_read = cail_mc_read,
.mc_write = cail_mc_write,
.pll_read = cail_pll_read,
.pll_write = cail_pll_write,
};
int radeon_atombios_init(struct radeon_device *rdev) int radeon_atombios_init(struct radeon_device *rdev)
{ {
atom_card_info.dev = rdev->ddev; struct card_info *atom_card_info =
rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios); kzalloc(sizeof(struct card_info), GFP_KERNEL);
if (!atom_card_info)
return -ENOMEM;
rdev->mode_info.atom_card_info = atom_card_info;
atom_card_info->dev = rdev->ddev;
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev); radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
return 0; return 0;
} }
...@@ -465,6 +469,7 @@ int radeon_atombios_init(struct radeon_device *rdev) ...@@ -465,6 +469,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
void radeon_atombios_fini(struct radeon_device *rdev) void radeon_atombios_fini(struct radeon_device *rdev)
{ {
kfree(rdev->mode_info.atom_context); kfree(rdev->mode_info.atom_context);
kfree(rdev->mode_info.atom_card_info);
} }
int radeon_combios_init(struct radeon_device *rdev) int radeon_combios_init(struct radeon_device *rdev)
......
...@@ -137,9 +137,6 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, ...@@ -137,9 +137,6 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
if (size != 256) { if (size != 256) {
return; return;
} }
if (crtc->fb == NULL) {
return;
}
/* userspace palettes are always correct as is */ /* userspace palettes are always correct as is */
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
...@@ -147,7 +144,6 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, ...@@ -147,7 +144,6 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
radeon_crtc->lut_g[i] = green[i] >> 6; radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6; radeon_crtc->lut_b[i] = blue[i] >> 6;
} }
radeon_crtc_load_lut(crtc); radeon_crtc_load_lut(crtc);
} }
...@@ -338,27 +334,19 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) ...@@ -338,27 +334,19 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{ {
struct edid *edid;
int ret = 0; int ret = 0;
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
return -1; return -1;
if (!radeon_connector->edid) { if (!radeon_connector->edid) {
radeon_i2c_do_lock(radeon_connector, 1); radeon_i2c_do_lock(radeon_connector, 1);
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0); radeon_i2c_do_lock(radeon_connector, 0);
} else }
edid = radeon_connector->edid;
if (edid) { if (radeon_connector->edid) {
/* update digital bits here */ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
if (edid->input & DRM_EDID_INPUT_DIGITAL) ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
radeon_connector->use_digital = 1;
else
radeon_connector->use_digital = 0;
drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
ret = drm_add_edid_modes(&radeon_connector->base, edid);
kfree(edid);
return ret; return ret;
} }
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
...@@ -765,7 +753,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ...@@ -765,7 +753,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
radeon_crtc->rmx_type = radeon_encoder->rmx_type; radeon_crtc->rmx_type = radeon_encoder->rmx_type;
memcpy(&radeon_crtc->native_mode, memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode, &radeon_encoder->native_mode,
sizeof(struct radeon_native_mode)); sizeof(struct drm_display_mode));
first = false; first = false;
} else { } else {
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
...@@ -783,10 +771,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ...@@ -783,10 +771,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (radeon_crtc->rmx_type != RMX_OFF) { if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b; fixed20_12 a, b;
a.full = rfixed_const(crtc->mode.vdisplay); a.full = rfixed_const(crtc->mode.vdisplay);
b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
radeon_crtc->vsc.full = rfixed_div(a, b); radeon_crtc->vsc.full = rfixed_div(a, b);
a.full = rfixed_const(crtc->mode.hdisplay); a.full = rfixed_const(crtc->mode.hdisplay);
b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
radeon_crtc->hsc.full = rfixed_div(a, b); radeon_crtc->hsc.full = rfixed_div(a, b);
} else { } else {
radeon_crtc->vsc.full = rfixed_const(1); radeon_crtc->vsc.full = rfixed_const(1);
......
This diff is collapsed.
...@@ -140,15 +140,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -140,15 +140,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
WARN(1, "trying to unbind memory to unitialized GART !\n"); WARN(1, "trying to unbind memory to unitialized GART !\n");
return; return;
} }
t = offset / 4096; t = offset / RADEON_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / 4096); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) { if (rdev->gart.pages[p]) {
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL; rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = 0; rdev->gart.pages_addr[p] = 0;
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, 0); radeon_gart_set_page(rdev, t, 0);
} }
} }
...@@ -169,8 +169,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -169,8 +169,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
DRM_ERROR("trying to bind memory to unitialized GART !\n"); DRM_ERROR("trying to bind memory to unitialized GART !\n");
return -EINVAL; return -EINVAL;
} }
t = offset / 4096; t = offset / RADEON_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / 4096); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
/* we need to support large memory configurations */ /* we need to support large memory configurations */
...@@ -185,9 +185,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -185,9 +185,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
} }
rdev->gart.pages[p] = pagelist[i]; rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p]; page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base); radeon_gart_set_page(rdev, t, page_base);
page_base += 4096; page_base += RADEON_GPU_PAGE_SIZE;
} }
} }
mb(); mb();
...@@ -200,14 +200,14 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -200,14 +200,14 @@ int radeon_gart_init(struct radeon_device *rdev)
if (rdev->gart.pages) { if (rdev->gart.pages) {
return 0; return 0;
} }
/* We need PAGE_SIZE >= 4096 */ /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
if (PAGE_SIZE < 4096) { if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n"); DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL; return -EINVAL;
} }
/* Compute table size */ /* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096; rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */ /* Allocate pages table */
......
...@@ -92,6 +92,13 @@ int radeon_irq_kms_init(struct radeon_device *rdev) ...@@ -92,6 +92,13 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (r) { if (r) {
return r; return r;
} }
/* enable msi */
rdev->msi_enabled = 0;
if (rdev->family >= CHIP_RV380) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret)
rdev->msi_enabled = 1;
}
drm_irq_install(rdev->ddev); drm_irq_install(rdev->ddev);
rdev->irq.installed = true; rdev->irq.installed = true;
DRM_INFO("radeon: irq initialized.\n"); DRM_INFO("radeon: irq initialized.\n");
...@@ -103,5 +110,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) ...@@ -103,5 +110,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->irq.installed) { if (rdev->irq.installed) {
rdev->irq.installed = false; rdev->irq.installed = false;
drm_irq_uninstall(rdev->ddev); drm_irq_uninstall(rdev->ddev);
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
} }
} }
...@@ -48,7 +48,7 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, ...@@ -48,7 +48,7 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
(RADEON_VERT_STRETCH_RESERVED | (RADEON_VERT_STRETCH_RESERVED |
...@@ -95,19 +95,19 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, ...@@ -95,19 +95,19 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
fp_horz_vert_active = 0; fp_horz_vert_active = 0;
if (native_mode->panel_xres == 0 || if (native_mode->hdisplay == 0 ||
native_mode->panel_yres == 0) { native_mode->vdisplay == 0) {
hscale = false; hscale = false;
vscale = false; vscale = false;
} else { } else {
if (xres > native_mode->panel_xres) if (xres > native_mode->hdisplay)
xres = native_mode->panel_xres; xres = native_mode->hdisplay;
if (yres > native_mode->panel_yres) if (yres > native_mode->vdisplay)
yres = native_mode->panel_yres; yres = native_mode->vdisplay;
if (xres == native_mode->panel_xres) if (xres == native_mode->hdisplay)
hscale = false; hscale = false;
if (yres == native_mode->panel_yres) if (yres == native_mode->vdisplay)
vscale = false; vscale = false;
} }
...@@ -119,11 +119,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, ...@@ -119,11 +119,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
else { else {
inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
/ native_mode->panel_xres + 1; / native_mode->hdisplay + 1;
fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
RADEON_HORZ_STRETCH_BLEND | RADEON_HORZ_STRETCH_BLEND |
RADEON_HORZ_STRETCH_ENABLE | RADEON_HORZ_STRETCH_ENABLE |
((native_mode->panel_xres/8-1) << 16)); ((native_mode->hdisplay/8-1) << 16));
} }
if (!vscale) if (!vscale)
...@@ -131,11 +131,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, ...@@ -131,11 +131,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
else { else {
inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
/ native_mode->panel_yres + 1; / native_mode->vdisplay + 1;
fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
RADEON_VERT_STRETCH_ENABLE | RADEON_VERT_STRETCH_ENABLE |
RADEON_VERT_STRETCH_BLEND | RADEON_VERT_STRETCH_BLEND |
((native_mode->panel_yres-1) << 12)); ((native_mode->vdisplay-1) << 12));
} }
break; break;
case RMX_CENTER: case RMX_CENTER:
...@@ -175,8 +175,8 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, ...@@ -175,8 +175,8 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
? RADEON_CRTC_V_SYNC_POL ? RADEON_CRTC_V_SYNC_POL
: 0))); : 0)));
fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
(((native_mode->panel_xres / 8) & 0x1ff) << 16)); (((native_mode->hdisplay / 8) & 0x1ff) << 16));
break; break;
case RMX_OFF: case RMX_OFF:
default: default:
...@@ -532,6 +532,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -532,6 +532,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
radeon_fb = to_radeon_framebuffer(old_fb); radeon_fb = to_radeon_framebuffer(old_fb);
radeon_gem_object_unpin(radeon_fb->obj); radeon_gem_object_unpin(radeon_fb->obj);
} }
/* Bytes per pixel may have changed */
radeon_bandwidth_update(rdev);
return 0; return 0;
} }
...@@ -664,6 +668,9 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod ...@@ -664,6 +668,9 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl); WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
} else { } else {
uint32_t crtc_gen_cntl; uint32_t crtc_gen_cntl;
uint32_t crtc_ext_cntl; uint32_t crtc_ext_cntl;
...@@ -1015,14 +1022,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, ...@@ -1015,14 +1022,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb) int x, int y, struct drm_framebuffer *old_fb)
{ {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
/* TODO TV */ /* TODO TV */
radeon_crtc_set_base(crtc, x, y, old_fb); radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode); radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode); radeon_set_pll(crtc, adjusted_mode);
radeon_bandwidth_update(rdev);
if (radeon_crtc->crtc_id == 0) { if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
} else { } else {
......
...@@ -107,8 +107,6 @@ static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) ...@@ -107,8 +107,6 @@ static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
else else
radeon_combios_output_lock(encoder, true); radeon_combios_output_lock(encoder, true);
radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
radeon_encoder_set_active_device(encoder);
} }
static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
...@@ -192,6 +190,8 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, ...@@ -192,6 +190,8 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
{ {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
if (radeon_encoder->rmx_type != RMX_OFF) if (radeon_encoder->rmx_type != RMX_OFF)
...@@ -218,7 +218,8 @@ static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder, ...@@ -218,7 +218,8 @@ static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
return true; return true;
...@@ -272,7 +273,6 @@ static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) ...@@ -272,7 +273,6 @@ static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
else else
radeon_combios_output_lock(encoder, true); radeon_combios_output_lock(encoder, true);
radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
radeon_encoder_set_active_device(encoder);
} }
static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
...@@ -468,7 +468,6 @@ static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) ...@@ -468,7 +468,6 @@ static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
else else
radeon_combios_output_lock(encoder, true); radeon_combios_output_lock(encoder, true);
radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
radeon_encoder_set_active_device(encoder);
} }
static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
...@@ -543,6 +542,14 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, ...@@ -543,6 +542,14 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
RADEON_FP_DFP_SYNC_SEL |
RADEON_FP_CRT_SYNC_SEL |
RADEON_FP_CRTC_LOCK_8DOT |
RADEON_FP_USE_SHADOW_EN |
RADEON_FP_CRTC_USE_SHADOW_VEND |
RADEON_FP_CRT_SYNC_ALT);
if (1) /* FIXME rgbBits == 8 */ if (1) /* FIXME rgbBits == 8 */
fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
else else
...@@ -556,7 +563,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, ...@@ -556,7 +563,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
else else
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
} else } else
fp_gen_cntl |= RADEON_FP_SEL_CRTC1; fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
} else { } else {
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
...@@ -593,7 +600,8 @@ static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder, ...@@ -593,7 +600,8 @@ static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
return true; return true;
...@@ -636,7 +644,6 @@ static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) ...@@ -636,7 +644,6 @@ static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
else else
radeon_combios_output_lock(encoder, true); radeon_combios_output_lock(encoder, true);
radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
radeon_encoder_set_active_device(encoder);
} }
static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
...@@ -735,7 +742,8 @@ static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder, ...@@ -735,7 +742,8 @@ static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
return true; return true;
...@@ -839,7 +847,6 @@ static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) ...@@ -839,7 +847,6 @@ static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
else else
radeon_combios_output_lock(encoder, true); radeon_combios_output_lock(encoder, true);
radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
radeon_encoder_set_active_device(encoder);
} }
static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
......
...@@ -172,6 +172,7 @@ enum radeon_connector_table { ...@@ -172,6 +172,7 @@ enum radeon_connector_table {
struct radeon_mode_info { struct radeon_mode_info {
struct atom_context *atom_context; struct atom_context *atom_context;
struct card_info *atom_card_info;
enum radeon_connector_table connector_table; enum radeon_connector_table connector_table;
bool mode_config_initialized; bool mode_config_initialized;
struct radeon_crtc *crtcs[2]; struct radeon_crtc *crtcs[2];
...@@ -186,17 +187,6 @@ struct radeon_mode_info { ...@@ -186,17 +187,6 @@ struct radeon_mode_info {
}; };
struct radeon_native_mode {
/* preferred mode */
uint32_t panel_xres, panel_yres;
uint32_t hoverplus, hsync_width;
uint32_t hblank;
uint32_t voverplus, vsync_width;
uint32_t vblank;
uint32_t dotclock;
uint32_t flags;
};
#define MAX_H_CODE_TIMING_LEN 32 #define MAX_H_CODE_TIMING_LEN 32
#define MAX_V_CODE_TIMING_LEN 32 #define MAX_V_CODE_TIMING_LEN 32
...@@ -228,7 +218,7 @@ struct radeon_crtc { ...@@ -228,7 +218,7 @@ struct radeon_crtc {
enum radeon_rmx_type rmx_type; enum radeon_rmx_type rmx_type;
fixed20_12 vsc; fixed20_12 vsc;
fixed20_12 hsc; fixed20_12 hsc;
struct radeon_native_mode native_mode; struct drm_display_mode native_mode;
}; };
struct radeon_encoder_primary_dac { struct radeon_encoder_primary_dac {
...@@ -248,7 +238,7 @@ struct radeon_encoder_lvds { ...@@ -248,7 +238,7 @@ struct radeon_encoder_lvds {
bool use_bios_dividers; bool use_bios_dividers;
uint32_t lvds_gen_cntl; uint32_t lvds_gen_cntl;
/* panel mode */ /* panel mode */
struct radeon_native_mode native_mode; struct drm_display_mode native_mode;
}; };
struct radeon_encoder_tv_dac { struct radeon_encoder_tv_dac {
...@@ -271,6 +261,16 @@ struct radeon_encoder_int_tmds { ...@@ -271,6 +261,16 @@ struct radeon_encoder_int_tmds {
struct radeon_tmds_pll tmds_pll[4]; struct radeon_tmds_pll tmds_pll[4];
}; };
/* spread spectrum */
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
uint8_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
};
struct radeon_encoder_atom_dig { struct radeon_encoder_atom_dig {
/* atom dig */ /* atom dig */
bool coherent_mode; bool coherent_mode;
...@@ -278,8 +278,9 @@ struct radeon_encoder_atom_dig { ...@@ -278,8 +278,9 @@ struct radeon_encoder_atom_dig {
/* atom lvds */ /* atom lvds */
uint32_t lvds_misc; uint32_t lvds_misc;
uint16_t panel_pwr_delay; uint16_t panel_pwr_delay;
struct radeon_atom_ss *ss;
/* panel mode */ /* panel mode */
struct radeon_native_mode native_mode; struct drm_display_mode native_mode;
}; };
struct radeon_encoder_atom_dac { struct radeon_encoder_atom_dac {
...@@ -294,7 +295,7 @@ struct radeon_encoder { ...@@ -294,7 +295,7 @@ struct radeon_encoder {
uint32_t flags; uint32_t flags;
uint32_t pixel_clock; uint32_t pixel_clock;
enum radeon_rmx_type rmx_type; enum radeon_rmx_type rmx_type;
struct radeon_native_mode native_mode; struct drm_display_mode native_mode;
void *enc_priv; void *enc_priv;
}; };
...@@ -308,12 +309,15 @@ struct radeon_connector { ...@@ -308,12 +309,15 @@ struct radeon_connector {
uint32_t connector_id; uint32_t connector_id;
uint32_t devices; uint32_t devices;
struct radeon_i2c_chan *ddc_bus; struct radeon_i2c_chan *ddc_bus;
/* some systems have a an hdmi and vga port with a shared ddc line */
bool shared_ddc;
bool use_digital; bool use_digital;
/* we need to mind the EDID between detect /* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */ and get modes due to analog/digital/tvencoder */
struct edid *edid; struct edid *edid;
void *con_priv; void *con_priv;
bool dac_load_detect; bool dac_load_detect;
uint16_t connector_object_id;
}; };
struct radeon_framebuffer { struct radeon_framebuffer {
......
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rafał Miłecki <zajec5@gmail.com>
*/
#include "drmP.h"
#include "radeon.h"
int radeon_debugfs_pm_init(struct radeon_device *rdev);
int radeon_pm_init(struct radeon_device *rdev)
{
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
return 0;
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
return 0;
}
static struct drm_info_list radeon_pm_info_list[] = {
{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
};
#endif
int radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
#else
return 0;
#endif
}
...@@ -290,6 +290,8 @@ ...@@ -290,6 +290,8 @@
#define RADEON_BUS_CNTL 0x0030 #define RADEON_BUS_CNTL 0x0030
# define RADEON_BUS_MASTER_DIS (1 << 6) # define RADEON_BUS_MASTER_DIS (1 << 6)
# define RADEON_BUS_BIOS_DIS_ROM (1 << 12) # define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
# define RS600_BUS_MASTER_DIS (1 << 14)
# define RS600_MSI_REARM (1 << 20) /* rs600/rs690/rs740 */
# define RADEON_BUS_RD_DISCARD_EN (1 << 24) # define RADEON_BUS_RD_DISCARD_EN (1 << 24)
# define RADEON_BUS_RD_ABORT_EN (1 << 25) # define RADEON_BUS_RD_ABORT_EN (1 << 25)
# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28) # define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
...@@ -297,6 +299,9 @@ ...@@ -297,6 +299,9 @@
# define RADEON_BUS_READ_BURST (1 << 30) # define RADEON_BUS_READ_BURST (1 << 30)
#define RADEON_BUS_CNTL1 0x0034 #define RADEON_BUS_CNTL1 0x0034
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
#define RADEON_MSI_REARM_EN 0x0160
# define RV370_MSI_REARM_EN (1 << 0)
/* #define RADEON_PCIE_INDEX 0x0030 */ /* #define RADEON_PCIE_INDEX 0x0030 */
/* #define RADEON_PCIE_DATA 0x0034 */ /* #define RADEON_PCIE_DATA 0x0034 */
...@@ -3311,6 +3316,7 @@ ...@@ -3311,6 +3316,7 @@
#define RADEON_AIC_CNTL 0x01d0 #define RADEON_AIC_CNTL 0x01d0
# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) # define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1) # define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
# define RS400_MSI_REARM (1 << 3) /* rs400/rs480 */
#define RADEON_AIC_LO_ADDR 0x01dc #define RADEON_AIC_LO_ADDR 0x01dc
#define RADEON_AIC_PT_BASE 0x01d8 #define RADEON_AIC_PT_BASE 0x01d8
#define RADEON_AIC_HI_ADDR 0x01e0 #define RADEON_AIC_HI_ADDR 0x01e0
......
...@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests = /* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size * (Total GTT - IB pool - writeback page - ring buffer) / test size
*/ */
n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
rdev->cp.ring_size) / size; rdev->cp.ring_size) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
...@@ -102,7 +102,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -102,7 +102,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup; goto out_cleanup;
...@@ -145,7 +145,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -145,7 +145,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup; goto out_cleanup;
......
...@@ -295,6 +295,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -295,6 +295,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
if (unlikely(r)) {
goto out_cleanup;
}
r = ttm_tt_bind(bo->ttm, &tmp_mem); r = ttm_tt_bind(bo->ttm, &tmp_mem);
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
......
...@@ -418,6 +418,8 @@ int rs400_resume(struct radeon_device *rdev) ...@@ -418,6 +418,8 @@ int rs400_resume(struct radeon_device *rdev)
rs400_gart_disable(rdev); rs400_gart_disable(rdev);
/* Resume clock before doing reset */ /* Resume clock before doing reset */
r300_clock_startup(rdev); r300_clock_startup(rdev);
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) { if (radeon_gpu_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
......
...@@ -242,7 +242,7 @@ void rs600_irq_disable(struct radeon_device *rdev) ...@@ -242,7 +242,7 @@ void rs600_irq_disable(struct radeon_device *rdev)
int rs600_irq_process(struct radeon_device *rdev) int rs600_irq_process(struct radeon_device *rdev)
{ {
uint32_t status; uint32_t status, msi_rearm;
uint32_t r500_disp_int; uint32_t r500_disp_int;
status = rs600_irq_ack(rdev, &r500_disp_int); status = rs600_irq_ack(rdev, &r500_disp_int);
...@@ -260,6 +260,22 @@ int rs600_irq_process(struct radeon_device *rdev) ...@@ -260,6 +260,22 @@ int rs600_irq_process(struct radeon_device *rdev)
drm_handle_vblank(rdev->ddev, 1); drm_handle_vblank(rdev->ddev, 1);
status = rs600_irq_ack(rdev, &r500_disp_int); status = rs600_irq_ack(rdev, &r500_disp_int);
} }
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
case CHIP_RS690:
case CHIP_RS740:
msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
WREG32(RADEON_BUS_CNTL, msi_rearm);
WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
break;
default:
msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
WREG32(RADEON_MSI_REARM_EN, msi_rearm);
WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
break;
}
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -472,6 +488,8 @@ int rs600_init(struct radeon_device *rdev) ...@@ -472,6 +488,8 @@ int rs600_init(struct radeon_device *rdev)
} }
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
rs600_vram_info(rdev); rs600_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
......
...@@ -706,6 +706,8 @@ int rs690_init(struct radeon_device *rdev) ...@@ -706,6 +706,8 @@ int rs690_init(struct radeon_device *rdev)
} }
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
rs690_vram_info(rdev); rs690_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
......
...@@ -137,6 +137,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) ...@@ -137,6 +137,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
void rv515_vga_render_disable(struct radeon_device *rdev) void rv515_vga_render_disable(struct radeon_device *rdev)
{ {
WREG32(R_000330_D1VGA_CONTROL, 0);
WREG32(R_000338_D2VGA_CONTROL, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, WREG32(R_000300_VGA_RENDER_CONTROL,
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
} }
...@@ -585,6 +587,8 @@ int rv515_init(struct radeon_device *rdev) ...@@ -585,6 +587,8 @@ int rv515_init(struct radeon_device *rdev)
} }
/* Initialize clocks */ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */ /* Get vram informations */
rv515_vram_info(rdev); rv515_vram_info(rdev);
/* Initialize memory controller (also test AGP) */ /* Initialize memory controller (also test AGP) */
......
...@@ -529,11 +529,11 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -529,11 +529,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
if (rdev->family == CHIP_RV770) if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1); gb_tiling_config |= BANK_TILING(1);
else else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK); gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
gb_tiling_config |= GROUP_SIZE(0); gb_tiling_config |= GROUP_SIZE(0);
if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) { if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3); gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3); gb_tiling_config |= SAMPLE_SPLIT(3);
} else { } else {
...@@ -579,14 +579,14 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -579,14 +579,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */ /* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
ROQ_IB2_START(0x2b))); ROQ_IB2_START(0x2b)));
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
SYNC_GRADIENT | SYNC_GRADIENT |
SYNC_WALKER | SYNC_WALKER |
SYNC_ALIGNER)); SYNC_ALIGNER));
sx_debug_1 = RREG32(SX_DEBUG_1); sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
...@@ -598,9 +598,9 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -598,9 +598,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(SMX_DC_CTL0, smx_dc_ctl0); WREG32(SMX_DC_CTL0, smx_dc_ctl0);
WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
GS_FLUSH_CTL(4) | GS_FLUSH_CTL(4) |
ACK_FLUSH_CTL(3) | ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL)); SYNC_FLUSH_CTL));
if (rdev->family == CHIP_RV770) if (rdev->family == CHIP_RV770)
WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
...@@ -611,12 +611,12 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -611,12 +611,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
} }
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
WREG32(PA_SC_MULTI_CHIP_CNTL, 0); WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
...@@ -774,14 +774,36 @@ int rv770_mc_init(struct radeon_device *rdev) ...@@ -774,14 +774,36 @@ int rv770_mc_init(struct radeon_device *rdev)
{ {
fixed20_12 a; fixed20_12 a;
u32 tmp; u32 tmp;
int chansize, numchan;
int r; int r;
/* Get VRAM informations */ /* Get VRAM informations */
/* FIXME: Don't know how to determine vram width, need to check
* vram_width usage
*/
rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true; rdev->mc.vram_is_ddr = true;
tmp = RREG32(MC_ARB_RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
chansize = 16;
} else if (tmp & CHANSIZE_MASK) {
chansize = 64;
} else {
chansize = 32;
}
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */ /* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
...@@ -961,10 +983,13 @@ int rv770_init(struct radeon_device *rdev) ...@@ -961,10 +983,13 @@ int rv770_init(struct radeon_device *rdev)
r600_scratch_init(rdev); r600_scratch_init(rdev);
/* Initialize surface registers */ /* Initialize surface registers */
radeon_surface_init(rdev); radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev); radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev); r = radeon_clocks_init(rdev);
if (r) if (r)
return r; return r;
/* Initialize power management */
radeon_pm_init(rdev);
/* Fence driver */ /* Fence driver */
r = radeon_fence_driver_init(rdev); r = radeon_fence_driver_init(rdev);
if (r) if (r)
......
...@@ -129,6 +129,10 @@ ...@@ -129,6 +129,10 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C #define HDP_TILING_CONFIG 0x2F3C
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
#define MC_ARB_RAMCFG 0x2760 #define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0 #define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003 #define NOOFBANK_MASK 0x00000003
...@@ -142,6 +146,7 @@ ...@@ -142,6 +146,7 @@
#define CHANSIZE_MASK 0x00000100 #define CHANSIZE_MASK 0x00000100
#define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_SHIFT 9
#define BURSTLENGTH_MASK 0x00000200 #define BURSTLENGTH_MASK 0x00000200
#define CHANSIZE_OVERRIDE (1 << 11)
#define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_TOP 0x2028
#define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030 #define MC_VM_AGP_BASE 0x2030
......
...@@ -279,6 +279,7 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) ...@@ -279,6 +279,7 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
return ttm_tt_set_caching(ttm, state); return ttm_tt_set_caching(ttm, state);
} }
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment