Commit fa739f4b authored by James Zhu's avatar James Zhu Committed by Alex Deucher

drm/amdgpu: add multiple instances support for Arcturus

Arcturus has dual-VCN. Need add multiple instances support for Arcturus.
Signed-off-by: default avatarJames Zhu <James.Zhu@amd.com>
Reviewed-by: default avatarLeo Liu <leo.liu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c01b6a1d
...@@ -408,23 +408,29 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, ...@@ -408,23 +408,29 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break; break;
case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN; type = AMD_IP_BLOCK_TYPE_VCN;
if (adev->vcn.inst[0].ring_dec.sched.ready) for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++num_rings; if (adev->vcn.inst[i].ring_dec.sched.ready)
++num_rings;
}
ib_start_alignment = 16; ib_start_alignment = 16;
ib_size_alignment = 16; ib_size_alignment = 16;
break; break;
case AMDGPU_HW_IP_VCN_ENC: case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN; type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++) for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.inst[0].ring_enc[i].sched.ready) for (j = 0; j < adev->vcn.num_enc_rings; j++)
++num_rings; if (adev->vcn.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
ib_start_alignment = 64; ib_start_alignment = 64;
ib_size_alignment = 1; ib_size_alignment = 1;
break; break;
case AMDGPU_HW_IP_VCN_JPEG: case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN; type = AMD_IP_BLOCK_TYPE_VCN;
if (adev->vcn.inst[0].ring_jpeg.sched.ready) for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++num_rings; if (adev->vcn.inst[i].ring_jpeg.sched.ready)
++num_rings;
}
ib_start_alignment = 16; ib_start_alignment = 16;
ib_size_alignment = 16; ib_size_alignment = 16;
break; break;
......
...@@ -65,7 +65,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -65,7 +65,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
const char *fw_name; const char *fw_name;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
unsigned char fw_check; unsigned char fw_check;
int r; int i, r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
...@@ -146,12 +146,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -146,12 +146,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[0].vcpu_bo, for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
&adev->vcn.inst[0].gpu_addr, &adev->vcn.inst[0].cpu_addr); r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
if (r) { AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
return r; if (r) {
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
}
} }
if (adev->vcn.indirect_sram) { if (adev->vcn.indirect_sram) {
...@@ -169,26 +172,28 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -169,26 +172,28 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{ {
int i; int i, j;
kvfree(adev->vcn.inst[0].saved_bo);
if (adev->vcn.indirect_sram) { if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
&adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_gpu_addr,
(void **)&adev->vcn.dpg_sram_cpu_addr); (void **)&adev->vcn.dpg_sram_cpu_addr);
} }
amdgpu_bo_free_kernel(&adev->vcn.inst[0].vcpu_bo, for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
&adev->vcn.inst[0].gpu_addr, kvfree(adev->vcn.inst[j].saved_bo);
(void **)&adev->vcn.inst[0].cpu_addr);
amdgpu_ring_fini(&adev->vcn.inst[0].ring_dec); amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
&adev->vcn.inst[j].gpu_addr,
(void **)&adev->vcn.inst[j].cpu_addr);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
amdgpu_ring_fini(&adev->vcn.inst[0].ring_enc[i]);
amdgpu_ring_fini(&adev->vcn.inst[0].ring_jpeg); for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg);
}
release_firmware(adev->vcn.fw); release_firmware(adev->vcn.fw);
...@@ -199,21 +204,23 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) ...@@ -199,21 +204,23 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
{ {
unsigned size; unsigned size;
void *ptr; void *ptr;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work); cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->vcn.inst[0].vcpu_bo == NULL) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
return 0; if (adev->vcn.inst[i].vcpu_bo == NULL)
return 0;
size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo); size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
ptr = adev->vcn.inst[0].cpu_addr; ptr = adev->vcn.inst[i].cpu_addr;
adev->vcn.inst[0].saved_bo = kvmalloc(size, GFP_KERNEL); adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vcn.inst[0].saved_bo) if (!adev->vcn.inst[i].saved_bo)
return -ENOMEM; return -ENOMEM;
memcpy_fromio(adev->vcn.inst[0].saved_bo, ptr, size);
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
}
return 0; return 0;
} }
...@@ -221,32 +228,34 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) ...@@ -221,32 +228,34 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
{ {
unsigned size; unsigned size;
void *ptr; void *ptr;
int i;
if (adev->vcn.inst[0].vcpu_bo == NULL) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
return -EINVAL; if (adev->vcn.inst[i].vcpu_bo == NULL)
return -EINVAL;
size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
ptr = adev->vcn.inst[0].cpu_addr; size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
ptr = adev->vcn.inst[i].cpu_addr;
if (adev->vcn.inst[0].saved_bo != NULL) {
memcpy_toio(ptr, adev->vcn.inst[0].saved_bo, size); if (adev->vcn.inst[i].saved_bo != NULL) {
kvfree(adev->vcn.inst[0].saved_bo); memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
adev->vcn.inst[0].saved_bo = NULL; kvfree(adev->vcn.inst[i].saved_bo);
} else { adev->vcn.inst[i].saved_bo = NULL;
const struct common_firmware_header *hdr; } else {
unsigned offset; const struct common_firmware_header *hdr;
unsigned offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
memcpy_toio(adev->vcn.inst[0].cpu_addr, adev->vcn.fw->data + offset, offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
le32_to_cpu(hdr->ucode_size_bytes)); memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
size -= le32_to_cpu(hdr->ucode_size_bytes); le32_to_cpu(hdr->ucode_size_bytes));
ptr += le32_to_cpu(hdr->ucode_size_bytes); size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
} }
memset_io(ptr, 0, size);
} }
return 0; return 0;
} }
...@@ -254,31 +263,34 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) ...@@ -254,31 +263,34 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{ {
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vcn.idle_work.work); container_of(work, struct amdgpu_device, vcn.idle_work.work);
unsigned int fences = 0; unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i; unsigned int i, j;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) { for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]); for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
} fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state; struct dpg_pause_state new_state;
if (fences) if (fence[j])
new_state.fw_based = VCN_DPG_STATE__PAUSE; new_state.fw_based = VCN_DPG_STATE__PAUSE;
else else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE; new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg)) if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE; new_state.jpeg = VCN_DPG_STATE__PAUSE;
else else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE; new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
adev->vcn.pause_dpg_mode(adev, &new_state); adev->vcn.pause_dpg_mode(adev, &new_state);
} }
fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg); fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg);
fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_dec); fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
fences += fence[j];
}
if (fences == 0) { if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true); amdgpu_gfx_off_ctrl(adev, true);
...@@ -312,14 +324,14 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) ...@@ -312,14 +324,14 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
unsigned int i; unsigned int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) { for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]); fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
} }
if (fences) if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE; new_state.fw_based = VCN_DPG_STATE__PAUSE;
else else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE; new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg)) if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE; new_state.jpeg = VCN_DPG_STATE__PAUSE;
else else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE; new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
...@@ -345,7 +357,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) ...@@ -345,7 +357,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i; unsigned i;
int r; int r;
WREG32(adev->vcn.inst[0].external.scratch9, 0xCAFEDEAD); WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3); r = amdgpu_ring_alloc(ring, 3);
if (r) if (r)
return r; return r;
...@@ -353,7 +365,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) ...@@ -353,7 +365,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(adev->vcn.inst[0].external.scratch9); tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
break; break;
udelay(1); udelay(1);
...@@ -664,7 +676,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) ...@@ -664,7 +676,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
unsigned i; unsigned i;
int r; int r;
WREG32(adev->vcn.inst[0].external.jpeg_pitch, 0xCAFEDEAD); WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3); r = amdgpu_ring_alloc(ring, 3);
if (r) if (r)
return r; return r;
...@@ -674,7 +686,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) ...@@ -674,7 +686,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch); tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
break; break;
udelay(1); udelay(1);
...@@ -748,7 +760,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -748,7 +760,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} }
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch); tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
break; break;
udelay(1); udelay(1);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment