Commit d698291c authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.2' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Radeon and amdgpu fixes for 4.2.  The audio fix ended up being more
invasive than I would have liked, but this should finally fix up the
last of the regressions since DP audio support was added.

* 'drm-fixes-4.2' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: add new parameter to seperate map and unmap
  drm/amdgpu: hdp_flush is not needed for inside IB
  drm/amdgpu: different emit_ib for gfx and compute
  drm/amdgpu: information leak in amdgpu_info_ioctl()
  drm/amdgpu: clean up init sequence for failures
  drm/radeon/combios: add some validation of lvds values
  drm/radeon: rework audio modeset to handle non-audio hdmi features
  drm/radeon: rework audio detect (v4)
  drm/amdgpu: Drop drm/ prefix for including drm.h in amdgpu_drm.h
  drm/radeon: Drop drm/ prefix for including drm.h in radeon_drm.h
parents 520e8bfc 194a3364
...@@ -1866,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); ...@@ -1866,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
struct amdgpu_ip_block_status {
bool valid;
bool sw;
bool hw;
};
struct amdgpu_device { struct amdgpu_device {
struct device *dev; struct device *dev;
struct drm_device *ddev; struct drm_device *ddev;
...@@ -2008,7 +2014,7 @@ struct amdgpu_device { ...@@ -2008,7 +2014,7 @@ struct amdgpu_device {
const struct amdgpu_ip_block_version *ip_blocks; const struct amdgpu_ip_block_version *ip_blocks;
int num_ip_blocks; int num_ip_blocks;
bool *ip_block_enabled; struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock; struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7); DECLARE_HASHTABLE(mn_hash, 7);
......
...@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) ...@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL; return -EINVAL;
} }
adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); adev->ip_block_status = kcalloc(adev->num_ip_blocks,
if (adev->ip_block_enabled == NULL) sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
if (adev->ip_block_status == NULL)
return -ENOMEM; return -ENOMEM;
if (adev->ip_blocks == NULL) { if (adev->ip_blocks == NULL) {
...@@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev) ...@@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i); DRM_ERROR("disabled ip block: %d\n", i);
adev->ip_block_enabled[i] = false; adev->ip_block_status[i].valid = false;
} else { } else {
if (adev->ip_blocks[i].funcs->early_init) { if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev); r = adev->ip_blocks[i].funcs->early_init((void *)adev);
if (r == -ENOENT) if (r == -ENOENT)
adev->ip_block_enabled[i] = false; adev->ip_block_status[i].valid = false;
else if (r) else if (r)
return r; return r;
else else
adev->ip_block_enabled[i] = true; adev->ip_block_status[i].valid = true;
} else { } else {
adev->ip_block_enabled[i] = true; adev->ip_block_status[i].valid = true;
} }
} }
} }
...@@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev) ...@@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r; int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i]) if (!adev->ip_block_status[i].valid)
continue; continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev); r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r) if (r)
return r; return r;
adev->ip_block_status[i].sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */ /* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev); r = amdgpu_vram_scratch_init(adev);
...@@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev) ...@@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = amdgpu_wb_init(adev); r = amdgpu_wb_init(adev);
if (r) if (r)
return r; return r;
adev->ip_block_status[i].hw = true;
} }
} }
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i]) if (!adev->ip_block_status[i].sw)
continue; continue;
/* gmc hw init is done early */ /* gmc hw init is done early */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
...@@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev) ...@@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->hw_init((void *)adev); r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r) if (r)
return r; return r;
adev->ip_block_status[i].hw = true;
} }
return 0; return 0;
...@@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev) ...@@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r; int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i]) if (!adev->ip_block_status[i].valid)
continue; continue;
/* enable clockgating to save power */ /* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
...@@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev) ...@@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
int i, r; int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_enabled[i]) if (!adev->ip_block_status[i].hw)
continue; continue;
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev); amdgpu_wb_fini(adev);
...@@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev) ...@@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return r; return r;
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */ /* XXX handle errors */
adev->ip_block_status[i].hw = false;
} }
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_enabled[i]) if (!adev->ip_block_status[i].sw)
continue; continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */ /* XXX handle errors */
adev->ip_block_enabled[i] = false; adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false;
} }
return 0; return 0;
...@@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev) ...@@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
int i, r; int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_enabled[i]) if (!adev->ip_block_status[i].valid)
continue; continue;
/* ungate blocks so that suspend can properly shut them down */ /* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
...@@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev) ...@@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r; int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i]) if (!adev->ip_block_status[i].valid)
continue; continue;
r = adev->ip_blocks[i].funcs->resume(adev); r = adev->ip_blocks[i].funcs->resume(adev);
if (r) if (r)
...@@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) ...@@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev); amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev); amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev); r = amdgpu_fini(adev);
kfree(adev->ip_block_enabled); kfree(adev->ip_block_status);
adev->ip_block_enabled = NULL; adev->ip_block_status = NULL;
adev->accel_working = false; adev->accel_working = false;
/* free i2c buses */ /* free i2c buses */
amdgpu_i2c_fini(adev); amdgpu_i2c_fini(adev);
......
...@@ -449,7 +449,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, ...@@ -449,7 +449,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
* vital here, so they are not reported back to userspace. * vital here, so they are not reported back to userspace.
*/ */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va) struct amdgpu_bo_va *bo_va, uint32_t operation)
{ {
struct ttm_validate_buffer tv, *entry; struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos; struct amdgpu_bo_list_entry *vm_bos;
...@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r) if (r)
goto error_unlock; goto error_unlock;
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
if (operation == AMDGPU_VA_OP_MAP)
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
error_unlock: error_unlock:
mutex_unlock(&bo_va->vm->mutex); mutex_unlock(&bo_va->vm->mutex);
...@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
} }
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va); amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;
......
...@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) { if (vm) {
/* do context switch */ /* do context switch */
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
}
if (vm && ring->funcs->emit_gds_switch) if (ring->funcs->emit_gds_switch)
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
ib->gds_base, ib->gds_size, ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size, ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size); ib->oa_base, ib->oa_size);
if (ring->funcs->emit_hdp_flush) if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring); amdgpu_ring_emit_hdp_flush(ring);
}
old_ctx = ring->current_ctx; old_ctx = ring->current_ctx;
for (i = 0; i < num_ibs; ++i) { for (i = 0; i < num_ibs; ++i) {
......
...@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].type == type && if (adev->ip_blocks[i].type == type &&
adev->ip_block_enabled[i]) { adev->ip_block_status[i].valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].major; ip.hw_ip_version_major = adev->ip_blocks[i].major;
ip.hw_ip_version_minor = adev->ip_blocks[i].minor; ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
ip.capabilities_flags = 0; ip.capabilities_flags = 0;
...@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++) for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type && if (adev->ip_blocks[i].type == type &&
adev->ip_block_enabled[i] && adev->ip_block_status[i].valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++; count++;
...@@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0; return n ? -EFAULT : 0;
} }
case AMDGPU_INFO_DEV_INFO: { case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info; struct drm_amdgpu_info_device dev_info = {};
struct amdgpu_cu_info cu_info; struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device; dev_info.device_id = dev->pdev->device;
......
...@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, ...@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
* sheduling on the ring. This function schedules the IB * sheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU. * on the gfx ring for execution by the GPU.
*/ */
static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib)
{ {
bool need_ctx_switch = ring->current_ctx != ib->ctx; bool need_ctx_switch = ring->current_ctx != ib->ctx;
...@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */ /* drop the CE preamble IB for the same context */
if ((ring->type == AMDGPU_RING_TYPE_GFX) && if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
!need_ctx_switch)
return; return;
if (ring->type == AMDGPU_RING_TYPE_COMPUTE) if (need_ctx_switch)
control |= INDIRECT_BUFFER_VALID;
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
next_rptr += 2; next_rptr += 2;
next_rptr += 4; next_rptr += 4;
...@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr); amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */ /* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
} }
...@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control); amdgpu_ring_write(ring, control);
} }
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
control |= INDIRECT_BUFFER_VALID;
next_rptr += 4;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
amdgpu_ring_write(ring, next_rptr);
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw |
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
/** /**
* gfx_v7_0_ring_test_ib - basic ring IB test * gfx_v7_0_ring_test_ib - basic ring IB test
* *
...@@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { ...@@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.get_wptr = gfx_v7_0_ring_get_wptr_gfx, .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx, .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.parse_cs = NULL, .parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib, .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
...@@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { ...@@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.get_wptr = gfx_v7_0_ring_get_wptr_compute, .get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute, .set_wptr = gfx_v7_0_ring_set_wptr_compute,
.parse_cs = NULL, .parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib, .emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
......
...@@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) ...@@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */ amdgpu_ring_write(ring, 0x20); /* poll interval */
} }
static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib)
{ {
bool need_ctx_switch = ring->current_ctx != ib->ctx; bool need_ctx_switch = ring->current_ctx != ib->ctx;
...@@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */ /* drop the CE preamble IB for the same context */
if ((ring->type == AMDGPU_RING_TYPE_GFX) && if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
!need_ctx_switch)
return; return;
if (ring->type == AMDGPU_RING_TYPE_COMPUTE) if (need_ctx_switch)
control |= INDIRECT_BUFFER_VALID;
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
next_rptr += 2; next_rptr += 2;
next_rptr += 4; next_rptr += 4;
...@@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr); amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */ /* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
} }
...@@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control); amdgpu_ring_write(ring, control);
} }
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
control |= INDIRECT_BUFFER_VALID;
next_rptr += 4;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
amdgpu_ring_write(ring, next_rptr);
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw |
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags) u64 seq, unsigned flags)
{ {
...@@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { ...@@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.get_wptr = gfx_v8_0_ring_get_wptr_gfx, .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx, .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.parse_cs = NULL, .parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib, .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
...@@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { ...@@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.get_wptr = gfx_v8_0_ring_get_wptr_compute, .get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute, .set_wptr = gfx_v8_0_ring_set_wptr_compute,
.parse_cs = NULL, .parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib, .emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
......
...@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, ...@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
encoder_mode = atombios_get_encoder_mode(encoder); encoder_mode = atombios_get_encoder_mode(encoder);
if (connector && (radeon_audio != 0) && if (connector && (radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) || ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
(ENCODER_MODE_IS_DP(encoder_mode) && ENCODER_MODE_IS_DP(encoder_mode)))
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
radeon_audio_mode_set(encoder, adjusted_mode); radeon_audio_mode_set(encoder, adjusted_mode);
} }
......
...@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) ...@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset;
if (!dig || !dig->afmt || !dig->afmt->pin) if (!dig || !dig->afmt || !dig->pin)
return; return;
offset = dig->afmt->offset; WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
AFMT_AUDIO_SRC_SELECT(dig->pin->id));
WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
} }
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct drm_connector *connector, struct drm_display_mode *mode) struct drm_connector *connector,
struct drm_display_mode *mode)
{ {
struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 tmp = 0, offset; u32 tmp = 0;
if (!dig || !dig->afmt || !dig->afmt->pin) if (!dig || !dig->afmt || !dig->pin)
return; return;
offset = dig->afmt->pin->offset;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1]) if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
...@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, ...@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
else else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
} }
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
} }
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
u8 *sadb, int sad_count) u8 *sadb, int sad_count)
{ {
struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset, tmp; u32 tmp;
if (!dig || !dig->afmt || !dig->afmt->pin) if (!dig || !dig->afmt || !dig->pin)
return; return;
offset = dig->afmt->pin->offset;
/* program the speaker allocation */ /* program the speaker allocation */
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); tmp = RREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */ /* set HDMI mode */
tmp |= HDMI_CONNECTION; tmp |= HDMI_CONNECTION;
...@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, ...@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]); tmp |= SPEAKER_ALLOCATION(sadb[0]);
else else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
} }
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
u8 *sadb, int sad_count) u8 *sadb, int sad_count)
{ {
struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset, tmp; u32 tmp;
if (!dig || !dig->afmt || !dig->afmt->pin) if (!dig || !dig->afmt || !dig->pin)
return; return;
offset = dig->afmt->pin->offset;
/* program the speaker allocation */ /* program the speaker allocation */
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); tmp = RREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set DP mode */ /* set DP mode */
tmp |= DP_CONNECTION; tmp |= DP_CONNECTION;
...@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, ...@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]); tmp |= SPEAKER_ALLOCATION(sadb[0]);
else else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
} }
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
struct cea_sad *sads, int sad_count) struct cea_sad *sads, int sad_count)
{ {
u32 offset;
int i; int i;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
...@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, ...@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
}; };
if (!dig || !dig->afmt || !dig->afmt->pin) if (!dig || !dig->afmt || !dig->pin)
return; return;
offset = dig->afmt->pin->offset;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0; u32 value = 0;
u8 stereo_freqs = 0; u8 stereo_freqs = 0;
...@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, ...@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
} }
} }
...@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev, ...@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
} }
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock) struct radeon_crtc *crtc, unsigned int clock)
{ {
/* Two dtos; generally use dto0 for HDMI */ /* Two dtos; generally use dto0 for HDMI */
u32 value = 0; u32 value = 0;
...@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, ...@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
} }
void dce6_dp_audio_set_dto(struct radeon_device *rdev, void dce6_dp_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock) struct radeon_crtc *crtc, unsigned int clock)
{ {
/* Two dtos; generally use dto1 for DP */ /* Two dtos; generally use dto1 for DP */
u32 value = 0; u32 value = 0;
......
This diff is collapsed.
...@@ -68,7 +68,8 @@ struct radeon_audio_funcs ...@@ -68,7 +68,8 @@ struct radeon_audio_funcs
int radeon_audio_init(struct radeon_device *rdev); int radeon_audio_init(struct radeon_device *rdev);
void radeon_audio_detect(struct drm_connector *connector, void radeon_audio_detect(struct drm_connector *connector,
enum drm_connector_status status); struct drm_encoder *encoder,
enum drm_connector_status status);
u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
u32 offset, u32 reg); u32 offset, u32 reg);
void radeon_audio_endpoint_wreg(struct radeon_device *rdev, void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
......
...@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder ...@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
if (hss > lvds->native_mode.hdisplay)
hss = (10 - 1) * 8;
lvds->native_mode.htotal = lvds->native_mode.hdisplay + lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; hss;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8); (RBIOS8(tmp + 23) * 8);
......
...@@ -1379,8 +1379,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) ...@@ -1379,8 +1379,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* updated in get modes as well since we need to know if it's analog or digital */ /* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret); radeon_connector_update_scratch_regs(connector, ret);
if (radeon_audio != 0) if ((radeon_audio != 0) && radeon_connector->use_digital) {
radeon_audio_detect(connector, ret); const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
radeon_connector_get_edid(connector);
radeon_audio_detect(connector, encoder, ret);
}
}
exit: exit:
pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
...@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) ...@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret); radeon_connector_update_scratch_regs(connector, ret);
if (radeon_audio != 0) if ((radeon_audio != 0) && encoder) {
radeon_audio_detect(connector, ret); radeon_connector_get_edid(connector);
radeon_audio_detect(connector, encoder, ret);
}
out: out:
pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
......
...@@ -237,7 +237,6 @@ struct radeon_afmt { ...@@ -237,7 +237,6 @@ struct radeon_afmt {
int offset; int offset;
bool last_buffer_filled_status; bool last_buffer_filled_status;
int id; int id;
struct r600_audio_pin *pin;
}; };
struct radeon_mode_info { struct radeon_mode_info {
...@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig { ...@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level; uint8_t backlight_level;
int panel_mode; int panel_mode;
struct radeon_afmt *afmt; struct radeon_afmt *afmt;
struct r600_audio_pin *pin;
int active_mst_links; int active_mst_links;
}; };
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#ifndef __AMDGPU_DRM_H__ #ifndef __AMDGPU_DRM_H__
#define __AMDGPU_DRM_H__ #define __AMDGPU_DRM_H__
#include <drm/drm.h> #include "drm.h"
#define DRM_AMDGPU_GEM_CREATE 0x00 #define DRM_AMDGPU_GEM_CREATE 0x00
#define DRM_AMDGPU_GEM_MMAP 0x01 #define DRM_AMDGPU_GEM_MMAP 0x01
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#ifndef __RADEON_DRM_H__ #ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__ #define __RADEON_DRM_H__
#include <drm/drm.h> #include "drm.h"
/* WARNING: If you change any of these defines, make sure to change the /* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h) * defines in the X server file (radeon_sarea.h)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment