Commit 7e76c5cf authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-radeon-next' of ../drm-radeon-next into drm-core-next

* 'drm-radeon-next' of ../drm-radeon-next:
  drm/radeon/kms: improve pflip precision on r1xx-r4xx
  drm/kms/radeon: Use high precision timestamps for pageflip completion events.
  drm/kms/radeon: Reorder vblank and pageflip interrupt handling.
  drm/radeon/kms: add pageflip ioctl support (v3)
  drm/kms/radeon: Add support for precise vblank timestamping.
parents a9979d60 acb32506
...@@ -40,6 +40,61 @@ ...@@ -40,6 +40,61 @@
static void evergreen_gpu_init(struct radeon_device *rdev); static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev);
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
u32 tmp;
/* make sure flip is at vb rather than hb */
tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
/* set pageflip to happen anywhere in vblank interval */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(crtc_base));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(crtc_base));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
/* Wait for update_pending to go high. */
while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* Return current update_pending status: */
return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
}
/* get temperature in millidegrees */ /* get temperature in millidegrees */
u32 evergreen_get_temp(struct radeon_device *rdev) u32 evergreen_get_temp(struct radeon_device *rdev)
{ {
...@@ -2060,6 +2115,7 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2060,6 +2115,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0; u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
if (!rdev->irq.installed) { if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
...@@ -2085,27 +2141,33 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2085,27 +2141,33 @@ int evergreen_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.crtc_vblank_int[0]) { if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n"); DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK; crtc1 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[1]) { if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n"); DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK; crtc2 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[2]) { if (rdev->irq.crtc_vblank_int[2] ||
rdev->irq.pflip[2]) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n"); DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK; crtc3 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[3]) { if (rdev->irq.crtc_vblank_int[3] ||
rdev->irq.pflip[3]) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n"); DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK; crtc4 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[4]) { if (rdev->irq.crtc_vblank_int[4] ||
rdev->irq.pflip[4]) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n"); DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK; crtc5 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[5]) { if (rdev->irq.crtc_vblank_int[5] ||
rdev->irq.pflip[5]) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n"); DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK; crtc6 |= VBLANK_INT_MASK;
} }
...@@ -2148,6 +2210,13 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2148,6 +2210,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3); WREG32(DC_HPD3_INT_CONTROL, hpd3);
...@@ -2158,79 +2227,92 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2158,79 +2227,92 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0; return 0;
} }
static inline void evergreen_irq_ack(struct radeon_device *rdev, static inline void evergreen_irq_ack(struct radeon_device *rdev)
u32 *disp_int,
u32 *disp_int_cont,
u32 *disp_int_cont2,
u32 *disp_int_cont3,
u32 *disp_int_cont4,
u32 *disp_int_cont5)
{ {
u32 tmp; u32 tmp;
*disp_int = RREG32(DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
*disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
*disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
*disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
*disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
if (*disp_int & LB_D1_VBLANK_INTERRUPT) rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int & LB_D1_VLINE_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int & DC_HPD1_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL); tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp); WREG32(DC_HPD1_INT_CONTROL, tmp);
} }
if (*disp_int_cont & DC_HPD2_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL); tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp); WREG32(DC_HPD2_INT_CONTROL, tmp);
} }
if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL); tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp); WREG32(DC_HPD3_INT_CONTROL, tmp);
} }
if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL); tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp); WREG32(DC_HPD4_INT_CONTROL, tmp);
} }
if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
...@@ -2239,14 +2321,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev, ...@@ -2239,14 +2321,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev,
void evergreen_irq_disable(struct radeon_device *rdev) void evergreen_irq_disable(struct radeon_device *rdev)
{ {
u32 disp_int, disp_int_cont, disp_int_cont2;
u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
r600_disable_interrupts(rdev); r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */ /* Wait and acknowledge irq */
mdelay(1); mdelay(1);
evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, evergreen_irq_ack(rdev);
&disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
evergreen_disable_interrupt_state(rdev); evergreen_disable_interrupt_state(rdev);
} }
...@@ -2286,8 +2364,6 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2286,8 +2364,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 rptr = rdev->ih.rptr; u32 rptr = rdev->ih.rptr;
u32 src_id, src_data; u32 src_id, src_data;
u32 ring_index; u32 ring_index;
u32 disp_int, disp_int_cont, disp_int_cont2;
u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
unsigned long flags; unsigned long flags;
bool queue_hotplug = false; bool queue_hotplug = false;
...@@ -2308,8 +2384,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2308,8 +2384,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
restart_ih: restart_ih:
/* display interrupts */ /* display interrupts */
evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, evergreen_irq_ack(rdev);
&disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
rdev->ih.wptr = wptr; rdev->ih.wptr = wptr;
while (rptr != wptr) { while (rptr != wptr) {
...@@ -2322,17 +2397,21 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2322,17 +2397,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
case 1: /* D1 vblank/vline */ case 1: /* D1 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D1 vblank */ case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0); if (rdev->irq.crtc_vblank_int[0]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 0);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int &= ~LB_D1_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[0])
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n"); DRM_DEBUG("IH: D1 vblank\n");
} }
break; break;
case 1: /* D1 vline */ case 1: /* D1 vline */
if (disp_int & LB_D1_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
disp_int &= ~LB_D1_VLINE_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n"); DRM_DEBUG("IH: D1 vline\n");
} }
break; break;
...@@ -2344,17 +2423,21 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2344,17 +2423,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
case 2: /* D2 vblank/vline */ case 2: /* D2 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D2 vblank */ case 0: /* D2 vblank */
if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1); if (rdev->irq.crtc_vblank_int[1]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 1);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[1])
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n"); DRM_DEBUG("IH: D2 vblank\n");
} }
break; break;
case 1: /* D2 vline */ case 1: /* D2 vline */
if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n"); DRM_DEBUG("IH: D2 vline\n");
} }
break; break;
...@@ -2366,17 +2449,21 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2366,17 +2449,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
case 3: /* D3 vblank/vline */ case 3: /* D3 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D3 vblank */ case 0: /* D3 vblank */
if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 2); if (rdev->irq.crtc_vblank_int[2]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 2);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[2])
radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n"); DRM_DEBUG("IH: D3 vblank\n");
} }
break; break;
case 1: /* D3 vline */ case 1: /* D3 vline */
if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n"); DRM_DEBUG("IH: D3 vline\n");
} }
break; break;
...@@ -2388,17 +2475,21 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2388,17 +2475,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
case 4: /* D4 vblank/vline */ case 4: /* D4 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D4 vblank */ case 0: /* D4 vblank */
if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 3); if (rdev->irq.crtc_vblank_int[3]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 3);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[3])
radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n"); DRM_DEBUG("IH: D4 vblank\n");
} }
break; break;
case 1: /* D4 vline */ case 1: /* D4 vline */
if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n"); DRM_DEBUG("IH: D4 vline\n");
} }
break; break;
...@@ -2410,17 +2501,21 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2410,17 +2501,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
case 5: /* D5 vblank/vline */ case 5: /* D5 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D5 vblank */ case 0: /* D5 vblank */
if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 4); if (rdev->irq.crtc_vblank_int[4]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 4);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[4])
radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n"); DRM_DEBUG("IH: D5 vblank\n");
} }
break; break;
case 1: /* D5 vline */ case 1: /* D5 vline */
if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n"); DRM_DEBUG("IH: D5 vline\n");
} }
break; break;
...@@ -2432,17 +2527,21 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2432,17 +2527,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
case 6: /* D6 vblank/vline */ case 6: /* D6 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D6 vblank */ case 0: /* D6 vblank */
if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 5); if (rdev->irq.crtc_vblank_int[5]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 5);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[5])
radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n"); DRM_DEBUG("IH: D6 vblank\n");
} }
break; break;
case 1: /* D6 vline */ case 1: /* D6 vline */
if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n"); DRM_DEBUG("IH: D6 vline\n");
} }
break; break;
...@@ -2454,43 +2553,43 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2454,43 +2553,43 @@ int evergreen_irq_process(struct radeon_device *rdev)
case 42: /* HPD hotplug */ case 42: /* HPD hotplug */
switch (src_data) { switch (src_data) {
case 0: case 0:
if (disp_int & DC_HPD1_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
disp_int &= ~DC_HPD1_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n"); DRM_DEBUG("IH: HPD1\n");
} }
break; break;
case 1: case 1:
if (disp_int_cont & DC_HPD2_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
disp_int_cont &= ~DC_HPD2_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n"); DRM_DEBUG("IH: HPD2\n");
} }
break; break;
case 2: case 2:
if (disp_int_cont2 & DC_HPD3_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
disp_int_cont2 &= ~DC_HPD3_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n"); DRM_DEBUG("IH: HPD3\n");
} }
break; break;
case 3: case 3:
if (disp_int_cont3 & DC_HPD4_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
disp_int_cont3 &= ~DC_HPD4_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n"); DRM_DEBUG("IH: HPD4\n");
} }
break; break;
case 4: case 4:
if (disp_int_cont4 & DC_HPD5_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
disp_int_cont4 &= ~DC_HPD5_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n"); DRM_DEBUG("IH: HPD5\n");
} }
break; break;
case 5: case 5:
if (disp_int_cont5 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
disp_int_cont5 &= ~DC_HPD6_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n"); DRM_DEBUG("IH: HPD6\n");
} }
......
...@@ -105,6 +105,11 @@ ...@@ -105,6 +105,11 @@
#define EVERGREEN_GRPH_Y_START 0x6830 #define EVERGREEN_GRPH_Y_START 0x6830
#define EVERGREEN_GRPH_X_END 0x6834 #define EVERGREEN_GRPH_X_END 0x6834
#define EVERGREEN_GRPH_Y_END 0x6838 #define EVERGREEN_GRPH_Y_END 0x6838
#define EVERGREEN_GRPH_UPDATE 0x6844
# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
#define EVERGREEN_CUR_CONTROL 0x6998 #define EVERGREEN_CUR_CONTROL 0x6998
...@@ -178,6 +183,7 @@ ...@@ -178,6 +183,7 @@
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c #define EVERGREEN_CRTC_STATUS 0x6e8c
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
......
...@@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520); ...@@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/ */
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
u32 tmp;
/* make sure flip is at vb rather than hb */
tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
/* make sure pending bit is asserted */
tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
/* set pageflip to happen as late as possible in the vblank interval.
* same field for crtc1/2
*/
tmp = RREG32(RADEON_CRTC_GEN_CNTL);
tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
WREG32(RADEON_CRTC_GEN_CNTL, tmp);
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
void r100_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Return current update_pending status: */
return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
}
void r100_pm_get_dynpm_state(struct radeon_device *rdev) void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{ {
int i; int i;
...@@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev) ...@@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) { if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK; tmp |= RADEON_GUI_IDLE_MASK;
} }
if (rdev->irq.crtc_vblank_int[0]) { if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK; tmp |= RADEON_CRTC_VBLANK_MASK;
} }
if (rdev->irq.crtc_vblank_int[1]) { if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK; tmp |= RADEON_CRTC2_VBLANK_MASK;
} }
if (rdev->irq.hpd[0]) { if (rdev->irq.hpd[0]) {
...@@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev) ...@@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev)
} }
/* Vertical blank interrupts */ /* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) { if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0); if (rdev->irq.crtc_vblank_int[0]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 0);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[0])
radeon_crtc_handle_flip(rdev, 0);
} }
if (status & RADEON_CRTC2_VBLANK_STAT) { if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1); if (rdev->irq.crtc_vblank_int[1]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 1);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[1])
radeon_crtc_handle_flip(rdev, 1);
} }
if (status & RADEON_FP_DETECT_STAT) { if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true; queue_hotplug = true;
......
...@@ -355,6 +355,8 @@ ...@@ -355,6 +355,8 @@
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
/* master controls */ /* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8 #define AVIVO_DC_CRTC_MASTER_EN 0x60f8
#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
...@@ -409,8 +411,10 @@ ...@@ -409,8 +411,10 @@
#define AVIVO_D1GRPH_X_END 0x6134 #define AVIVO_D1GRPH_X_END 0x6134
#define AVIVO_D1GRPH_Y_END 0x6138 #define AVIVO_D1GRPH_Y_END 0x6138
#define AVIVO_D1GRPH_UPDATE 0x6144 #define AVIVO_D1GRPH_UPDATE 0x6144
# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
#define AVIVO_D1CUR_CONTROL 0x6400 #define AVIVO_D1CUR_CONTROL 0x6400
# define AVIVO_D1CURSOR_EN (1 << 0) # define AVIVO_D1CURSOR_EN (1 << 0)
......
...@@ -2863,6 +2863,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) ...@@ -2863,6 +2863,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0); WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0); WREG32(DxMODE_INT_MASK, 0);
WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
if (ASIC_IS_DCE3(rdev)) { if (ASIC_IS_DCE3(rdev)) {
WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
...@@ -2987,6 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -2987,6 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0; u32 grbm_int_cntl = 0;
u32 hdmi1, hdmi2; u32 hdmi1, hdmi2;
u32 d1grph = 0, d2grph = 0;
if (!rdev->irq.installed) { if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
...@@ -3023,11 +3026,13 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -3023,11 +3026,13 @@ int r600_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.crtc_vblank_int[0]) { if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n"); DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK; mode_int |= D1MODE_VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[1]) { if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
DRM_DEBUG("r600_irq_set: vblank 1\n"); DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK; mode_int |= D2MODE_VBLANK_INT_MASK;
} }
...@@ -3070,6 +3075,8 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -3070,6 +3075,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int); WREG32(DxMODE_INT_MASK, mode_int);
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) { if (ASIC_IS_DCE3(rdev)) {
...@@ -3092,32 +3099,35 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -3092,32 +3099,35 @@ int r600_irq_set(struct radeon_device *rdev)
return 0; return 0;
} }
static inline void r600_irq_ack(struct radeon_device *rdev, static inline void r600_irq_ack(struct radeon_device *rdev)
u32 *disp_int,
u32 *disp_int_cont,
u32 *disp_int_cont2)
{ {
u32 tmp; u32 tmp;
if (ASIC_IS_DCE3(rdev)) { if (ASIC_IS_DCE3(rdev)) {
*disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
*disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
*disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
} else { } else {
*disp_int = RREG32(DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
*disp_int_cont2 = 0; rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
} }
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
if (*disp_int & LB_D1_VBLANK_INTERRUPT) rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
if (*disp_int & LB_D1_VLINE_INTERRUPT) if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
if (*disp_int & LB_D2_VBLANK_INTERRUPT) if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
if (*disp_int & LB_D2_VLINE_INTERRUPT) if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
if (*disp_int & DC_HPD1_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) { if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD1_INT_CONTROL); tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
...@@ -3128,7 +3138,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, ...@@ -3128,7 +3138,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
} }
} }
if (*disp_int & DC_HPD2_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) { if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD2_INT_CONTROL); tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
...@@ -3139,7 +3149,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, ...@@ -3139,7 +3149,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
} }
} }
if (*disp_int_cont & DC_HPD3_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) { if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD3_INT_CONTROL); tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
...@@ -3150,18 +3160,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, ...@@ -3150,18 +3160,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
} }
} }
if (*disp_int_cont & DC_HPD4_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL); tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp); WREG32(DC_HPD4_INT_CONTROL, tmp);
} }
if (ASIC_IS_DCE32(rdev)) { if (ASIC_IS_DCE32(rdev)) {
if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
...@@ -3183,12 +3193,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev, ...@@ -3183,12 +3193,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
void r600_irq_disable(struct radeon_device *rdev) void r600_irq_disable(struct radeon_device *rdev)
{ {
u32 disp_int, disp_int_cont, disp_int_cont2;
r600_disable_interrupts(rdev); r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */ /* Wait and acknowledge irq */
mdelay(1); mdelay(1);
r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); r600_irq_ack(rdev);
r600_disable_interrupt_state(rdev); r600_disable_interrupt_state(rdev);
} }
...@@ -3251,7 +3259,7 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3251,7 +3259,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev); u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr; u32 rptr = rdev->ih.rptr;
u32 src_id, src_data; u32 src_id, src_data;
u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; u32 ring_index;
unsigned long flags; unsigned long flags;
bool queue_hotplug = false; bool queue_hotplug = false;
...@@ -3272,7 +3280,7 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3272,7 +3280,7 @@ int r600_irq_process(struct radeon_device *rdev)
restart_ih: restart_ih:
/* display interrupts */ /* display interrupts */
r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); r600_irq_ack(rdev);
rdev->ih.wptr = wptr; rdev->ih.wptr = wptr;
while (rptr != wptr) { while (rptr != wptr) {
...@@ -3285,17 +3293,21 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3285,17 +3293,21 @@ int r600_irq_process(struct radeon_device *rdev)
case 1: /* D1 vblank/vline */ case 1: /* D1 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D1 vblank */ case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0); if (rdev->irq.crtc_vblank_int[0]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 0);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int &= ~LB_D1_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[0])
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n"); DRM_DEBUG("IH: D1 vblank\n");
} }
break; break;
case 1: /* D1 vline */ case 1: /* D1 vline */
if (disp_int & LB_D1_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
disp_int &= ~LB_D1_VLINE_INTERRUPT; rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n"); DRM_DEBUG("IH: D1 vline\n");
} }
break; break;
...@@ -3307,17 +3319,21 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3307,17 +3319,21 @@ int r600_irq_process(struct radeon_device *rdev)
case 5: /* D2 vblank/vline */ case 5: /* D2 vblank/vline */
switch (src_data) { switch (src_data) {
case 0: /* D2 vblank */ case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1); if (rdev->irq.crtc_vblank_int[1]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 1);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
disp_int &= ~LB_D2_VBLANK_INTERRUPT; wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[1])
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n"); DRM_DEBUG("IH: D2 vblank\n");
} }
break; break;
case 1: /* D1 vline */ case 1: /* D1 vline */
if (disp_int & LB_D2_VLINE_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
disp_int &= ~LB_D2_VLINE_INTERRUPT; rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n"); DRM_DEBUG("IH: D2 vline\n");
} }
break; break;
...@@ -3329,43 +3345,43 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3329,43 +3345,43 @@ int r600_irq_process(struct radeon_device *rdev)
case 19: /* HPD/DAC hotplug */ case 19: /* HPD/DAC hotplug */
switch (src_data) { switch (src_data) {
case 0: case 0:
if (disp_int & DC_HPD1_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
disp_int &= ~DC_HPD1_INTERRUPT; rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n"); DRM_DEBUG("IH: HPD1\n");
} }
break; break;
case 1: case 1:
if (disp_int & DC_HPD2_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
disp_int &= ~DC_HPD2_INTERRUPT; rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n"); DRM_DEBUG("IH: HPD2\n");
} }
break; break;
case 4: case 4:
if (disp_int_cont & DC_HPD3_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
disp_int_cont &= ~DC_HPD3_INTERRUPT; rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n"); DRM_DEBUG("IH: HPD3\n");
} }
break; break;
case 5: case 5:
if (disp_int_cont & DC_HPD4_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
disp_int_cont &= ~DC_HPD4_INTERRUPT; rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n"); DRM_DEBUG("IH: HPD4\n");
} }
break; break;
case 10: case 10:
if (disp_int_cont2 & DC_HPD5_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
disp_int_cont2 &= ~DC_HPD5_INTERRUPT; rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n"); DRM_DEBUG("IH: HPD5\n");
} }
break; break;
case 12: case 12:
if (disp_int_cont2 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
disp_int_cont2 &= ~DC_HPD6_INTERRUPT; rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n"); DRM_DEBUG("IH: HPD6\n");
} }
......
...@@ -728,6 +728,15 @@ ...@@ -728,6 +728,15 @@
/* DCE 3.2 */ /* DCE 3.2 */
# define DC_HPDx_EN (1 << 28) # define DC_HPDx_EN (1 << 28)
#define D1GRPH_INTERRUPT_STATUS 0x6158
#define D2GRPH_INTERRUPT_STATUS 0x6958
# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
#define D1GRPH_INTERRUPT_CONTROL 0x615c
#define D2GRPH_INTERRUPT_CONTROL 0x695c
# define DxGRPH_PFLIP_INT_MASK (1 << 0)
# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
/* /*
* PM4 * PM4
*/ */
......
...@@ -377,11 +377,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); ...@@ -377,11 +377,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/* /*
* IRQS. * IRQS.
*/ */
struct radeon_unpin_work {
struct work_struct work;
struct radeon_device *rdev;
int crtc_id;
struct radeon_fence *fence;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
u64 new_crtc_base;
};
struct r500_irq_stat_regs {
u32 disp_int;
};
struct r600_irq_stat_regs {
u32 disp_int;
u32 disp_int_cont;
u32 disp_int_cont2;
u32 d1grph_int;
u32 d2grph_int;
};
struct evergreen_irq_stat_regs {
u32 disp_int;
u32 disp_int_cont;
u32 disp_int_cont2;
u32 disp_int_cont3;
u32 disp_int_cont4;
u32 disp_int_cont5;
u32 d1grph_int;
u32 d2grph_int;
u32 d3grph_int;
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
};
union radeon_irq_stat_regs {
struct r500_irq_stat_regs r500;
struct r600_irq_stat_regs r600;
struct evergreen_irq_stat_regs evergreen;
};
struct radeon_irq { struct radeon_irq {
bool installed; bool installed;
bool sw_int; bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */ /* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[6]; bool crtc_vblank_int[6];
bool pflip[6];
wait_queue_head_t vblank_queue; wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */ /* FIXME: use defines for max hpd/dacs */
bool hpd[6]; bool hpd[6];
...@@ -392,12 +437,17 @@ struct radeon_irq { ...@@ -392,12 +437,17 @@ struct radeon_irq {
bool hdmi[2]; bool hdmi[2];
spinlock_t sw_lock; spinlock_t sw_lock;
int sw_refcount; int sw_refcount;
union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[6];
int pflip_refcount[6];
}; };
int radeon_irq_kms_init(struct radeon_device *rdev); int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/* /*
* CP & ring. * CP & ring.
...@@ -881,6 +931,10 @@ struct radeon_asic { ...@@ -881,6 +931,10 @@ struct radeon_asic {
void (*pm_finish)(struct radeon_device *rdev); void (*pm_finish)(struct radeon_device *rdev);
void (*pm_init_profile)(struct radeon_device *rdev); void (*pm_init_profile)(struct radeon_device *rdev);
void (*pm_get_dynpm_state)(struct radeon_device *rdev); void (*pm_get_dynpm_state)(struct radeon_device *rdev);
/* pageflipping */
void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
void (*post_page_flip)(struct radeon_device *rdev, int crtc);
}; };
/* /*
...@@ -1344,6 +1398,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) ...@@ -1344,6 +1398,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
/* Common functions */ /* Common functions */
/* AGP */ /* AGP */
......
...@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = { ...@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = {
.pm_finish = &r100_pm_finish, .pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile, .pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
}; };
static struct radeon_asic r200_asic = { static struct radeon_asic r200_asic = {
...@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = { ...@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = {
.pm_finish = &r100_pm_finish, .pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile, .pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
}; };
static struct radeon_asic r300_asic = { static struct radeon_asic r300_asic = {
...@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = { ...@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = {
.pm_finish = &r100_pm_finish, .pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile, .pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
}; };
static struct radeon_asic r300_asic_pcie = { static struct radeon_asic r300_asic_pcie = {
...@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = { ...@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = {
.pm_finish = &r100_pm_finish, .pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile, .pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
}; };
static struct radeon_asic r420_asic = { static struct radeon_asic r420_asic = {
...@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = { ...@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = {
.pm_finish = &r100_pm_finish, .pm_finish = &r100_pm_finish,
.pm_init_profile = &r420_pm_init_profile, .pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
}; };
static struct radeon_asic rs400_asic = { static struct radeon_asic rs400_asic = {
...@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = { ...@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = {
.pm_finish = &r100_pm_finish, .pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile, .pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
}; };
static struct radeon_asic rs600_asic = { static struct radeon_asic rs600_asic = {
...@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = { ...@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = {
.pm_finish = &rs600_pm_finish, .pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile, .pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
}; };
static struct radeon_asic rs690_asic = { static struct radeon_asic rs690_asic = {
...@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = { ...@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = {
.pm_finish = &rs600_pm_finish, .pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile, .pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
}; };
static struct radeon_asic rv515_asic = { static struct radeon_asic rv515_asic = {
...@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = { ...@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = {
.pm_finish = &rs600_pm_finish, .pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile, .pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
}; };
static struct radeon_asic r520_asic = { static struct radeon_asic r520_asic = {
...@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = { ...@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = {
.pm_finish = &rs600_pm_finish, .pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile, .pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state, .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
}; };
static struct radeon_asic r600_asic = { static struct radeon_asic r600_asic = {
...@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = { ...@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = {
.pm_finish = &rs600_pm_finish, .pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile, .pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state, .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
}; };
static struct radeon_asic rs780_asic = { static struct radeon_asic rs780_asic = {
...@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = { ...@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = {
.pm_finish = &rs600_pm_finish, .pm_finish = &rs600_pm_finish,
.pm_init_profile = &rs780_pm_init_profile, .pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state, .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
}; };
static struct radeon_asic rv770_asic = { static struct radeon_asic rv770_asic = {
...@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = { ...@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = {
.pm_finish = &rs600_pm_finish, .pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile, .pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state, .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rv770_page_flip,
.post_page_flip = &rs600_post_page_flip,
}; };
static struct radeon_asic evergreen_asic = { static struct radeon_asic evergreen_asic = {
...@@ -749,6 +788,9 @@ static struct radeon_asic evergreen_asic = { ...@@ -749,6 +788,9 @@ static struct radeon_asic evergreen_asic = {
.pm_finish = &evergreen_pm_finish, .pm_finish = &evergreen_pm_finish,
.pm_init_profile = &r600_pm_init_profile, .pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state, .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
}; };
int radeon_asic_init(struct radeon_device *rdev) int radeon_asic_init(struct radeon_device *rdev)
......
...@@ -130,6 +130,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev); ...@@ -130,6 +130,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
extern void r100_pm_finish(struct radeon_device *rdev); extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev); extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
/* /*
* r200,rv250,rs300,rv280 * r200,rv250,rs300,rv280
...@@ -205,6 +208,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev, ...@@ -205,6 +208,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
extern void rs600_pm_misc(struct radeon_device *rdev); extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev); extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev); extern void rs600_pm_finish(struct radeon_device *rdev);
extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
/* /*
* rs690,rs740 * rs690,rs740
...@@ -287,6 +293,7 @@ void rv770_fini(struct radeon_device *rdev); ...@@ -287,6 +293,7 @@ void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev); int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev); int rv770_resume(struct radeon_device *rdev);
extern void rv770_pm_misc(struct radeon_device *rdev); extern void rv770_pm_misc(struct radeon_device *rdev);
extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
/* /*
* evergreen * evergreen
...@@ -314,5 +321,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); ...@@ -314,5 +321,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
#endif #endif
...@@ -183,12 +183,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) ...@@ -183,12 +183,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
kfree(radeon_crtc); kfree(radeon_crtc);
} }
/*
* Handle unpin events outside the interrupt handler proper.
*/
static void radeon_unpin_work_func(struct work_struct *__work)
{
struct radeon_unpin_work *work =
container_of(__work, struct radeon_unpin_work, work);
int r;
/* unpin of the old buffer */
r = radeon_bo_reserve(work->old_rbo, false);
if (likely(r == 0)) {
r = radeon_bo_unpin(work->old_rbo);
if (unlikely(r != 0)) {
DRM_ERROR("failed to unpin buffer after flip\n");
}
radeon_bo_unreserve(work->old_rbo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
kfree(work);
}
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
struct radeon_unpin_work *work;
struct drm_pending_vblank_event *e;
struct timeval now;
unsigned long flags;
u32 update_pending;
int vpos, hpos;
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
!radeon_fence_signaled(work->fence)) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
/* New pageflip, or just completion of a previous one? */
if (!radeon_crtc->deferred_flip_completion) {
/* do the flip (mmio) */
update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
} else {
/* This is just a completion of a flip queued in crtc
* at last invocation. Make sure we go directly to
* completion routine.
*/
update_pending = 0;
radeon_crtc->deferred_flip_completion = 0;
}
/* Has the pageflip already completed in crtc, or is it certain
* to complete in this vblank?
*/
if (update_pending &&
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
&vpos, &hpos)) &&
(vpos >=0) &&
(vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
/* crtc didn't flip in this target vblank interval,
* but flip is pending in crtc. It will complete it
* in next vblank interval, so complete the flip at
* next vblank irq.
*/
radeon_crtc->deferred_flip_completion = 1;
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
/* Pageflip (will be) certainly completed in this vblank. Clean up. */
radeon_crtc->unpin_work = NULL;
/* wakeup userspace */
if (work->event) {
e = work->event;
e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
radeon_fence_unref(&work->fence);
radeon_post_page_flip(work->rdev, work->crtc_id);
schedule_work(&work->work);
}
static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *old_radeon_fb;
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
struct radeon_fence *fence;
struct radeon_unpin_work *work;
unsigned long flags;
u32 tiling_flags, pitch_pixels;
u64 base;
int r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
r = radeon_fence_create(rdev, &fence);
if (unlikely(r != 0)) {
kfree(work);
DRM_ERROR("flip queue: failed to create fence.\n");
return -ENOMEM;
}
work->event = event;
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
work->fence = radeon_fence_ref(fence);
old_radeon_fb = to_radeon_framebuffer(crtc->fb);
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
rbo = obj->driver_private;
work->old_rbo = rbo;
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (radeon_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
radeon_fence_unref(&fence);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
radeon_crtc->unpin_work = work;
radeon_crtc->deferred_flip_completion = 0;
spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
obj = new_radeon_fb->obj;
rbo = obj->driver_private;
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto pflip_cleanup;
}
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto pflip_cleanup;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
base -= radeon_crtc->legacy_display_base_addr;
pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) {
base &= ~0x7ff;
} else {
int byteshift = fb->bits_per_pixel >> 4;
int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
}
} else {
int offset = crtc->y * pitch_pixels + crtc->x;
switch (fb->bits_per_pixel) {
case 8:
default:
offset *= 1;
break;
case 15:
case 16:
offset *= 2;
break;
case 24:
offset *= 3;
break;
case 32:
offset *= 4;
break;
}
base += offset;
}
base &= ~7;
}
spin_lock_irqsave(&dev->event_lock, flags);
work->new_crtc_base = base;
spin_unlock_irqrestore(&dev->event_lock, flags);
/* update crtc fb */
crtc->fb = fb;
r = drm_vblank_get(dev, radeon_crtc->crtc_id);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup1;
}
/* 32 ought to cover us */
r = radeon_ring_lock(rdev, 32);
if (r) {
DRM_ERROR("failed to lock the ring before flip\n");
goto pflip_cleanup2;
}
/* emit the fence */
radeon_fence_emit(rdev, fence);
/* set the proper interrupt */
radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
/* fire the ring */
radeon_ring_unlock_commit(rdev);
return 0;
pflip_cleanup2:
drm_vblank_put(dev, radeon_crtc->crtc_id);
pflip_cleanup1:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto pflip_cleanup;
}
r = radeon_bo_unpin(rbo);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
r = -EINVAL;
DRM_ERROR("failed to unpin new rbo in error path\n");
goto pflip_cleanup;
}
radeon_bo_unreserve(rbo);
pflip_cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
radeon_fence_unref(&fence);
kfree(work);
return r;
}
static const struct drm_crtc_funcs radeon_crtc_funcs = { static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set = radeon_crtc_cursor_set, .cursor_set = radeon_crtc_cursor_set,
.cursor_move = radeon_crtc_cursor_move, .cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set, .gamma_set = radeon_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config, .set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy, .destroy = radeon_crtc_destroy,
.page_flip = radeon_crtc_page_flip,
}; };
static void radeon_crtc_init(struct drm_device *dev, int index) static void radeon_crtc_init(struct drm_device *dev, int index)
...@@ -1019,7 +1279,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ...@@ -1019,7 +1279,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
/* /*
* Retrieve current video scanout position of crtc on a given gpu. * Retrieve current video scanout position of crtc on a given gpu.
* *
* \param rdev Device to query. * \param dev Device to query.
* \param crtc Crtc to query. * \param crtc Crtc to query.
* \param *vpos Location where vertical scanout position should be stored. * \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go. * \param *hpos Location where horizontal scanout position should go.
...@@ -1031,72 +1291,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ...@@ -1031,72 +1291,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* *
* \return Flags, or'ed together as follows: * \return Flags, or'ed together as follows:
* *
* RADEON_SCANOUTPOS_VALID = Query successfull. * DRM_SCANOUTPOS_VALID = Query successfull.
* RADEON_SCANOUTPOS_INVBL = Inside vblank. * DRM_SCANOUTPOS_INVBL = Inside vblank.
* RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
* this flag means that returned position may be offset by a constant but * this flag means that returned position may be offset by a constant but
* unknown small number of scanlines wrt. real scanout position. * unknown small number of scanlines wrt. real scanout position.
* *
*/ */
int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
{ {
u32 stat_crtc = 0, vbl = 0, position = 0; u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0; int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true; bool in_vbl = true;
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev)) { if (ASIC_IS_DCE4(rdev)) {
if (crtc == 0) { if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC0_REGISTER_OFFSET); EVERGREEN_CRTC0_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC0_REGISTER_OFFSET); EVERGREEN_CRTC0_REGISTER_OFFSET);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
if (crtc == 1) { if (crtc == 1) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC1_REGISTER_OFFSET); EVERGREEN_CRTC1_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC1_REGISTER_OFFSET); EVERGREEN_CRTC1_REGISTER_OFFSET);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
if (crtc == 2) { if (crtc == 2) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC2_REGISTER_OFFSET); EVERGREEN_CRTC2_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC2_REGISTER_OFFSET); EVERGREEN_CRTC2_REGISTER_OFFSET);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
if (crtc == 3) { if (crtc == 3) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC3_REGISTER_OFFSET); EVERGREEN_CRTC3_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC3_REGISTER_OFFSET); EVERGREEN_CRTC3_REGISTER_OFFSET);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
if (crtc == 4) { if (crtc == 4) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC4_REGISTER_OFFSET); EVERGREEN_CRTC4_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC4_REGISTER_OFFSET); EVERGREEN_CRTC4_REGISTER_OFFSET);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
if (crtc == 5) { if (crtc == 5) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC5_REGISTER_OFFSET); EVERGREEN_CRTC5_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC5_REGISTER_OFFSET); EVERGREEN_CRTC5_REGISTER_OFFSET);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
} else if (ASIC_IS_AVIVO(rdev)) { } else if (ASIC_IS_AVIVO(rdev)) {
if (crtc == 0) { if (crtc == 0) {
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
if (crtc == 1) { if (crtc == 1) {
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
} else { } else {
/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
...@@ -1112,7 +1374,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, ...@@ -1112,7 +1374,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1)) if (!(stat_crtc & 1))
in_vbl = false; in_vbl = false;
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
if (crtc == 1) { if (crtc == 1) {
vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
...@@ -1122,7 +1384,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, ...@@ -1122,7 +1384,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1)) if (!(stat_crtc & 1))
in_vbl = false; in_vbl = false;
ret |= RADEON_SCANOUTPOS_VALID; ret |= DRM_SCANOUTPOS_VALID;
} }
} }
...@@ -1133,13 +1395,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, ...@@ -1133,13 +1395,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Valid vblank area boundaries from gpu retrieved? */ /* Valid vblank area boundaries from gpu retrieved? */
if (vbl > 0) { if (vbl > 0) {
/* Yes: Decode. */ /* Yes: Decode. */
ret |= RADEON_SCANOUTPOS_ACCURATE; ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff; vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff;
} }
else { else {
/* No: Fake something reasonable which gives at least ok results. */ /* No: Fake something reasonable which gives at least ok results. */
vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
vbl_end = 0; vbl_end = 0;
} }
...@@ -1155,7 +1417,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, ...@@ -1155,7 +1417,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */ /* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) { if (in_vbl && (*vpos >= vbl_start)) {
vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
*vpos = *vpos - vtotal; *vpos = *vpos - vtotal;
} }
...@@ -1164,7 +1426,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, ...@@ -1164,7 +1426,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* In vblank? */ /* In vblank? */
if (in_vbl) if (in_vbl)
ret |= RADEON_SCANOUTPOS_INVBL; ret |= DRM_SCANOUTPOS_INVBL;
return ret; return ret;
} }
...@@ -48,9 +48,10 @@ ...@@ -48,9 +48,10 @@
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
* 2.8.0 - pageflip support
*/ */
#define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 7 #define KMS_DRIVER_MINOR 8
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev); int radeon_driver_unload_kms(struct drm_device *dev);
...@@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev); ...@@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev);
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
int *max_error,
struct timeval *vblank_time,
unsigned flags);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev); void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev); int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev); void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
...@@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, ...@@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj); int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj); void radeon_gem_object_free(struct drm_gem_object *obj);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[]; extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl; extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma); int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
...@@ -277,6 +284,8 @@ static struct drm_driver kms_driver = { ...@@ -277,6 +284,8 @@ static struct drm_driver kms_driver = {
.get_vblank_counter = radeon_get_vblank_counter_kms, .get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms, .enable_vblank = radeon_enable_vblank_kms,
.disable_vblank = radeon_disable_vblank_kms, .disable_vblank = radeon_disable_vblank_kms,
.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
.get_scanout_position = radeon_get_crtc_scanoutpos,
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
.debugfs_init = radeon_debugfs_init, .debugfs_init = radeon_debugfs_init,
.debugfs_cleanup = radeon_debugfs_cleanup, .debugfs_cleanup = radeon_debugfs_cleanup,
......
...@@ -71,8 +71,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) ...@@ -71,8 +71,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
rdev->irq.gui_idle = false; rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++) for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false; rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false; rdev->irq.hpd[i] = false;
rdev->irq.pflip[i] = false;
}
radeon_irq_set(rdev); radeon_irq_set(rdev);
/* Clear bits */ /* Clear bits */
radeon_irq_process(rdev); radeon_irq_process(rdev);
...@@ -101,8 +103,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) ...@@ -101,8 +103,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.gui_idle = false; rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++) for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false; rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false; rdev->irq.hpd[i] = false;
rdev->irq.pflip[i] = false;
}
radeon_irq_set(rdev); radeon_irq_set(rdev);
} }
...@@ -175,3 +179,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) ...@@ -175,3 +179,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
} }
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
rdev->irq.pflip[crtc] = true;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
}
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
rdev->irq.pflip[crtc] = false;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
}
...@@ -277,6 +277,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) ...@@ -277,6 +277,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
radeon_irq_set(rdev); radeon_irq_set(rdev);
} }
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
{
struct drm_crtc *drmcrtc;
struct radeon_device *rdev = dev->dev_private;
if (crtc < 0 || crtc >= dev->num_crtcs) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
/* Get associated drm_crtc: */
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
vblank_time, flags,
drmcrtc);
}
/* /*
* IOCTL. * IOCTL.
......
...@@ -277,6 +277,9 @@ struct radeon_crtc { ...@@ -277,6 +277,9 @@ struct radeon_crtc {
fixed20_12 hsc; fixed20_12 hsc;
struct drm_display_mode native_mode; struct drm_display_mode native_mode;
int pll_id; int pll_id;
/* page flipping */
struct radeon_unpin_work *unpin_work;
int deferred_flip_completion;
}; };
struct radeon_encoder_primary_dac { struct radeon_encoder_primary_dac {
...@@ -442,10 +445,6 @@ struct radeon_framebuffer { ...@@ -442,10 +445,6 @@ struct radeon_framebuffer {
struct drm_gem_object *obj; struct drm_gem_object *obj;
}; };
/* radeon_get_crtc_scanoutpos() return flags */
#define RADEON_SCANOUTPOS_VALID (1 << 0)
#define RADEON_SCANOUTPOS_INVBL (1 << 1)
#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
extern enum radeon_tv_std extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev); radeon_combios_get_tv_info(struct radeon_device *rdev);
...@@ -562,7 +561,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -562,7 +561,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y); int x, int y);
extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid * extern struct edid *
...@@ -662,4 +662,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev); ...@@ -662,4 +662,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
void radeon_fb_output_poll_changed(struct radeon_device *rdev); void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
#endif #endif
...@@ -720,9 +720,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) ...@@ -720,9 +720,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/ */
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) { if (rdev->pm.active_crtcs & (1 << crtc)) {
vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
if ((vbl_status & RADEON_SCANOUTPOS_VALID) && if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & RADEON_SCANOUTPOS_INVBL)) !(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false; in_vbl = false;
} }
} }
......
...@@ -422,6 +422,7 @@ ...@@ -422,6 +422,7 @@
# define RADEON_CRTC_CSYNC_EN (1 << 4) # define RADEON_CRTC_CSYNC_EN (1 << 4)
# define RADEON_CRTC_ICON_EN (1 << 15) # define RADEON_CRTC_ICON_EN (1 << 15)
# define RADEON_CRTC_CUR_EN (1 << 16) # define RADEON_CRTC_CUR_EN (1 << 16)
# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
# define RADEON_CRTC_CUR_MODE_MASK (7 << 20) # define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
# define RADEON_CRTC_CUR_MODE_SHIFT 20 # define RADEON_CRTC_CUR_MODE_SHIFT 20
# define RADEON_CRTC_CUR_MODE_MONO 0 # define RADEON_CRTC_CUR_MODE_MONO 0
...@@ -509,6 +510,8 @@ ...@@ -509,6 +510,8 @@
# define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_TILE_EN (1 << 15)
# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28)
# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29)
#define R300_CRTC_TILE_X0_Y0 0x0350 #define R300_CRTC_TILE_X0_Y0 0x0350
#define R300_CRTC2_TILE_X0_Y0 0x0358 #define R300_CRTC2_TILE_X0_Y0 0x0358
......
...@@ -46,6 +46,56 @@ ...@@ -46,6 +46,56 @@
void rs600_gpu_init(struct radeon_device *rdev); void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev);
void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
u32 tmp;
/* make sure flip is at vb rather than hb */
tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
/* set pageflip to happen anywhere in vblank interval */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
/* Wait for update_pending to go high. */
while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* Return current update_pending status: */
return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
}
void rs600_pm_misc(struct radeon_device *rdev) void rs600_pm_misc(struct radeon_device *rdev)
{ {
int requested_index = rdev->pm.requested_power_state_index; int requested_index = rdev->pm.requested_power_state_index;
...@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev) ...@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) { if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1); tmp |= S_000040_GUI_IDLE(1);
} }
if (rdev->irq.crtc_vblank_int[0]) { if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
} }
if (rdev->irq.crtc_vblank_int[1]) { if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
} }
if (rdev->irq.hpd[0]) { if (rdev->irq.hpd[0]) {
...@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev) ...@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev)
return 0; return 0;
} }
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) static inline u32 rs600_irq_ack(struct radeon_device *rdev)
{ {
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = S_000044_SW_INT(1); uint32_t irq_mask = S_000044_SW_INT(1);
...@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ ...@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
} }
if (G_000044_DISPLAY_INT_STAT(irqs)) { if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006534_D1MODE_VBLANK_STATUS, WREG32(R_006534_D1MODE_VBLANK_STATUS,
S_006534_D1MODE_VBLANK_ACK(1)); S_006534_D1MODE_VBLANK_ACK(1));
} }
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006D34_D2MODE_VBLANK_STATUS, WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1)); S_006D34_D2MODE_VBLANK_ACK(1));
} }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
} }
if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
} }
} else { } else {
*r500_disp_int = 0; rdev->irq.stat_regs.r500.disp_int = 0;
} }
if (irqs) { if (irqs) {
...@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ ...@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
void rs600_irq_disable(struct radeon_device *rdev) void rs600_irq_disable(struct radeon_device *rdev)
{ {
u32 tmp;
WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0); WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */ /* Wait and acknowledge irq */
mdelay(1); mdelay(1);
rs600_irq_ack(rdev, &tmp); rs600_irq_ack(rdev);
} }
int rs600_irq_process(struct radeon_device *rdev) int rs600_irq_process(struct radeon_device *rdev)
{ {
uint32_t status, msi_rearm; u32 status, msi_rearm;
uint32_t r500_disp_int;
bool queue_hotplug = false; bool queue_hotplug = false;
/* reset gui idle ack. the status bit is broken */ /* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false; rdev->irq.gui_idle_acked = false;
status = rs600_irq_ack(rdev, &r500_disp_int); status = rs600_irq_ack(rdev);
if (!status && !r500_disp_int) { if (!status && !rdev->irq.stat_regs.r500.disp_int) {
return IRQ_NONE; return IRQ_NONE;
} }
while (status || r500_disp_int) { while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */ /* SW interrupt */
if (G_000044_SW_INT(status)) if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev); radeon_fence_process(rdev);
}
/* GUI idle */ /* GUI idle */
if (G_000040_GUI_IDLE(status)) { if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true; rdev->irq.gui_idle_acked = true;
...@@ -611,25 +661,33 @@ int rs600_irq_process(struct radeon_device *rdev) ...@@ -611,25 +661,33 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.idle_queue); wake_up(&rdev->irq.idle_queue);
} }
/* Vertical blank interrupts */ /* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
drm_handle_vblank(rdev->ddev, 0); if (rdev->irq.crtc_vblank_int[0]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 0);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[0])
radeon_crtc_handle_flip(rdev, 0);
} }
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
drm_handle_vblank(rdev->ddev, 1); if (rdev->irq.crtc_vblank_int[1]) {
rdev->pm.vblank_sync = true; drm_handle_vblank(rdev->ddev, 1);
wake_up(&rdev->irq.vblank_queue); rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[1])
radeon_crtc_handle_flip(rdev, 1);
} }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("HPD1\n"); DRM_DEBUG("HPD1\n");
} }
if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true; queue_hotplug = true;
DRM_DEBUG("HPD2\n"); DRM_DEBUG("HPD2\n");
} }
status = rs600_irq_ack(rdev, &r500_disp_int); status = rs600_irq_ack(rdev);
} }
/* reset gui idle ack. the status bit is broken */ /* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false; rdev->irq.gui_idle_acked = false;
......
...@@ -42,6 +42,40 @@ ...@@ -42,6 +42,40 @@
static void rv770_gpu_init(struct radeon_device *rdev); static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev);
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* update the scanout addresses */
if (radeon_crtc->crtc_id) {
WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
} else {
WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
}
WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
/* Wait for update_pending to go high. */
while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* Return current update_pending status: */
return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
}
/* get temperature in millidegrees */ /* get temperature in millidegrees */
u32 rv770_get_temp(struct radeon_device *rdev) u32 rv770_get_temp(struct radeon_device *rdev)
{ {
......
...@@ -351,4 +351,11 @@ ...@@ -351,4 +351,11 @@
#define SRBM_STATUS 0x0E50 #define SRBM_STATUS 0x0E50
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment