Commit 7122e505 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.1' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Mostly stability fixes for UVD and VCE, plus a few other bug and regression
fixes.

* 'drm-fixes-4.1' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: stop trying to suspend UVD sessions
  drm/radeon: more strictly validate the UVD codec
  drm/radeon: make UVD handle checking more strict
  drm/radeon: make VCE handle check more strict
  drm/radeon: fix userptr lockup
  drm/radeon: fix userptr BO unpin bug v3
  drm/radeon: don't setup audio on asics that don't support it
  drm/radeon: disable semaphores for UVD V1 (v2)
parents 5ebe6afa 12e49fea
...@@ -1673,7 +1673,6 @@ struct radeon_uvd { ...@@ -1673,7 +1673,6 @@ struct radeon_uvd {
struct radeon_bo *vcpu_bo; struct radeon_bo *vcpu_bo;
void *cpu_addr; void *cpu_addr;
uint64_t gpu_addr; uint64_t gpu_addr;
void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES]; atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
unsigned img_size[RADEON_MAX_UVD_HANDLES]; unsigned img_size[RADEON_MAX_UVD_HANDLES];
......
...@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = { ...@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = {
static struct radeon_asic_ring rv770_uvd_ring = { static struct radeon_asic_ring rv770_uvd_ring = {
.ib_execute = &uvd_v1_0_ib_execute, .ib_execute = &uvd_v1_0_ib_execute,
.emit_fence = &uvd_v2_2_fence_emit, .emit_fence = &uvd_v2_2_fence_emit,
.emit_semaphore = &uvd_v1_0_semaphore_emit, .emit_semaphore = &uvd_v2_2_semaphore_emit,
.cs_parse = &radeon_uvd_cs_parse, .cs_parse = &radeon_uvd_cs_parse,
.ring_test = &uvd_v1_0_ring_test, .ring_test = &uvd_v1_0_ring_test,
.ib_test = &uvd_v1_0_ib_test, .ib_test = &uvd_v1_0_ib_test,
......
...@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); ...@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int uvd_v2_2_resume(struct radeon_device *rdev); int uvd_v2_2_resume(struct radeon_device *rdev);
void uvd_v2_2_fence_emit(struct radeon_device *rdev, void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
/* uvd v3.1 */ /* uvd v3.1 */
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
......
...@@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector, ...@@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector,
return; return;
rdev = connector->encoder->dev->dev_private; rdev = connector->encoder->dev->dev_private;
if (!radeon_audio_chipset_supported(rdev))
return;
radeon_encoder = to_radeon_encoder(connector->encoder); radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv; dig = radeon_encoder->enc_priv;
......
...@@ -142,6 +142,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -142,6 +142,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
list_for_each_entry(bo, &node->bos, mn_list) { list_for_each_entry(bo, &node->bos, mn_list) {
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
continue;
r = radeon_bo_reserve(bo, true); r = radeon_bo_reserve(bo, true);
if (r) { if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r); DRM_ERROR("(%ld) failed to reserve user bo\n", r);
......
...@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{ {
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
struct scatterlist *sg; struct sg_page_iter sg_iter;
int i;
int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ? enum dma_data_direction direction = write ?
...@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */ /* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
struct page *page = sg_page(sg); struct page *page = sg_page_iter_page(&sg_iter);
if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
set_page_dirty(page); set_page_dirty(page);
......
...@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev) ...@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
int radeon_uvd_suspend(struct radeon_device *rdev) int radeon_uvd_suspend(struct radeon_device *rdev)
{ {
unsigned size; int i, r;
void *ptr;
int i;
if (rdev->uvd.vcpu_bo == NULL) if (rdev->uvd.vcpu_bo == NULL)
return 0; return 0;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&rdev->uvd.handles[i])) uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
break; if (handle != 0) {
struct radeon_fence *fence;
if (i == RADEON_MAX_UVD_HANDLES) radeon_uvd_note_usage(rdev);
return 0;
size = radeon_bo_size(rdev->uvd.vcpu_bo); r = radeon_uvd_get_destroy_msg(rdev,
size -= rdev->uvd_fw->size; R600_RING_TYPE_UVD_INDEX, handle, &fence);
if (r) {
DRM_ERROR("Error destroying UVD (%d)!\n", r);
continue;
}
ptr = rdev->uvd.cpu_addr; radeon_fence_wait(fence, false);
ptr += rdev->uvd_fw->size; radeon_fence_unref(&fence);
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); rdev->uvd.filp[i] = NULL;
memcpy(rdev->uvd.saved_bo, ptr, size); atomic_set(&rdev->uvd.handles[i], 0);
}
}
return 0; return 0;
} }
...@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) ...@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr; ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size; ptr += rdev->uvd_fw->size;
if (rdev->uvd.saved_bo != NULL) { memset(ptr, 0, size);
memcpy(ptr, rdev->uvd.saved_bo, size);
kfree(rdev->uvd.saved_bo);
rdev->uvd.saved_bo = NULL;
} else
memset(ptr, 0, size);
return 0; return 0;
} }
...@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) ...@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
return 0; return 0;
} }
static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
unsigned stream_type)
{
switch (stream_type) {
case 0: /* H264 */
case 1: /* VC1 */
/* always supported */
return 0;
case 3: /* MPEG2 */
case 4: /* MPEG4 */
/* only since UVD 3 */
if (p->rdev->family >= CHIP_PALM)
return 0;
/* fall through */
default:
DRM_ERROR("UVD codec not supported by hardware %d!\n",
stream_type);
return -EINVAL;
}
}
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
unsigned offset, unsigned buf_sizes[]) unsigned offset, unsigned buf_sizes[])
{ {
...@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, ...@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL; return -EINVAL;
} }
if (msg_type == 1) { switch (msg_type) {
/* it's a decode msg, calc buffer sizes */ case 0:
r = radeon_uvd_cs_msg_decode(msg, buf_sizes); /* it's a create msg, calc image size (width * height) */
/* calc image size (width * height) */ img_size = msg[7] * msg[8];
img_size = msg[6] * msg[7];
r = radeon_uvd_validate_codec(p, msg[4]);
radeon_bo_kunmap(bo); radeon_bo_kunmap(bo);
if (r) if (r)
return r; return r;
} else if (msg_type == 2) { /* try to alloc a new handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL;
}
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
p->rdev->uvd.img_size[i] = img_size;
return 0;
}
}
DRM_ERROR("No more free UVD handles!\n");
return -EINVAL;
case 1:
/* it's a decode msg, validate codec and calc buffer sizes */
r = radeon_uvd_validate_codec(p, msg[4]);
if (!r)
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
radeon_bo_kunmap(bo);
if (r)
return r;
/* validate the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
if (p->rdev->uvd.filp[i] != p->filp) {
DRM_ERROR("UVD handle collision detected!\n");
return -EINVAL;
}
return 0;
}
}
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
return -ENOENT;
case 2:
/* it's a destroy msg, free the handle */ /* it's a destroy msg, free the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
radeon_bo_kunmap(bo); radeon_bo_kunmap(bo);
return 0; return 0;
} else {
/* it's a create msg, calc image size (width * height) */
img_size = msg[7] * msg[8];
radeon_bo_kunmap(bo);
if (msg_type != 0) { default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
return -EINVAL;
}
/* it's a create msg, no special handling needed */
}
/* create or decode, validate the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
return 0;
}
/* handle not found try to alloc a new one */ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { return -EINVAL;
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
p->rdev->uvd.img_size[i] = img_size;
return 0;
}
} }
DRM_ERROR("No more free UVD handles!\n"); BUG();
return -EINVAL; return -EINVAL;
} }
......
...@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, ...@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
* *
* @p: parser context * @p: parser context
* @handle: handle to validate * @handle: handle to validate
* @allocated: allocated a new handle?
* *
* Validates the handle and return the found session index or -EINVAL * Validates the handle and return the found session index or -EINVAL
* we we don't have another free session index. * we we don't have another free session index.
*/ */
int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
uint32_t handle, bool *allocated)
{ {
unsigned i; unsigned i;
*allocated = false;
/* validate the handle */ /* validate the handle */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
if (atomic_read(&p->rdev->vce.handles[i]) == handle) if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
if (p->rdev->vce.filp[i] != p->filp) {
DRM_ERROR("VCE handle collision detected!\n");
return -EINVAL;
}
return i; return i;
}
} }
/* handle not found try to alloc a new one */ /* handle not found try to alloc a new one */
...@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) ...@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
p->rdev->vce.filp[i] = p->filp; p->rdev->vce.filp[i] = p->filp;
p->rdev->vce.img_size[i] = 0; p->rdev->vce.img_size[i] = 0;
*allocated = true;
return i; return i;
} }
} }
...@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) ...@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
int radeon_vce_cs_parse(struct radeon_cs_parser *p) int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{ {
int session_idx = -1; int session_idx = -1;
bool destroyed = false; bool destroyed = false, created = false, allocated = false;
uint32_t tmp, handle = 0; uint32_t tmp, handle = 0;
uint32_t *size = &tmp; uint32_t *size = &tmp;
int i, r; int i, r = 0;
while (p->idx < p->chunk_ib->length_dw) { while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx); uint32_t len = radeon_get_ib_value(p, p->idx);
...@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
if ((len < 8) || (len & 3)) { if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len); DRM_ERROR("invalid VCE command length (%d)!\n", len);
return -EINVAL; r = -EINVAL;
goto out;
} }
if (destroyed) { if (destroyed) {
DRM_ERROR("No other command allowed after destroy!\n"); DRM_ERROR("No other command allowed after destroy!\n");
return -EINVAL; r = -EINVAL;
goto out;
} }
switch (cmd) { switch (cmd) {
case 0x00000001: // session case 0x00000001: // session
handle = radeon_get_ib_value(p, p->idx + 2); handle = radeon_get_ib_value(p, p->idx + 2);
session_idx = radeon_vce_validate_handle(p, handle); session_idx = radeon_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) if (session_idx < 0)
return session_idx; return session_idx;
size = &p->rdev->vce.img_size[session_idx]; size = &p->rdev->vce.img_size[session_idx];
...@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
break; break;
case 0x01000001: // create case 0x01000001: // create
created = true;
if (!allocated) {
DRM_ERROR("Handle already in use!\n");
r = -EINVAL;
goto out;
}
*size = radeon_get_ib_value(p, p->idx + 8) * *size = radeon_get_ib_value(p, p->idx + 8) *
radeon_get_ib_value(p, p->idx + 10) * radeon_get_ib_value(p, p->idx + 10) *
8 * 3 / 2; 8 * 3 / 2;
...@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
*size); *size);
if (r) if (r)
return r; goto out;
r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
*size / 3); *size / 3);
if (r) if (r)
return r; goto out;
break; break;
case 0x02000001: // destroy case 0x02000001: // destroy
...@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
*size * 2); *size * 2);
if (r) if (r)
return r; goto out;
break; break;
case 0x05000004: // video bitstream buffer case 0x05000004: // video bitstream buffer
...@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
tmp); tmp);
if (r) if (r)
return r; goto out;
break; break;
case 0x05000005: // feedback buffer case 0x05000005: // feedback buffer
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
4096); 4096);
if (r) if (r)
return r; goto out;
break; break;
default: default:
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
return -EINVAL; r = -EINVAL;
goto out;
} }
if (session_idx == -1) { if (session_idx == -1) {
DRM_ERROR("no session command at start of IB\n"); DRM_ERROR("no session command at start of IB\n");
return -EINVAL; r = -EINVAL;
goto out;
} }
p->idx += len / 4; p->idx += len / 4;
} }
if (destroyed) { if (allocated && !created) {
/* IB contains a destroy msg, free the handle */ DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out:
if ((!r && destroyed) || (r && allocated)) {
/*
* IB contains a destroy msg or we have allocated an
* handle and got an error, anyway free the handle
*/
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
} }
return 0; return r;
} }
/** /**
......
...@@ -989,6 +989,9 @@ ...@@ -989,6 +989,9 @@
((n) & 0x3FFF) << 16) ((n) & 0x3FFF) << 16)
/* UVD */ /* UVD */
#define UVD_SEMA_ADDR_LOW 0xef00
#define UVD_SEMA_ADDR_HIGH 0xef04
#define UVD_SEMA_CMD 0xef08
#define UVD_GPCOM_VCPU_CMD 0xef0c #define UVD_GPCOM_VCPU_CMD 0xef0c
#define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA0 0xef10
#define UVD_GPCOM_VCPU_DATA1 0xef14 #define UVD_GPCOM_VCPU_DATA1 0xef14
......
...@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, ...@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
{ {
uint64_t addr = semaphore->gpu_addr; /* disable semaphores for UVD V1 hardware */
return false;
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
return true;
} }
/** /**
......
...@@ -59,6 +59,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, ...@@ -59,6 +59,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 2); radeon_ring_write(ring, 2);
} }
/**
* uvd_v2_2_semaphore_emit - emit semaphore command
*
* @rdev: radeon_device pointer
* @ring: radeon_ring pointer
* @semaphore: semaphore to emit commands for
* @emit_wait: true if we should emit a wait command
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
return true;
}
/** /**
* uvd_v2_2_resume - memory controller programming * uvd_v2_2_resume - memory controller programming
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment