Commit 0f30a397 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: implement UVD VM mode for Stoney v2

Starting with Stoney we support running UVD in VM mode as well.

v2: rebased, only enable on Polaris for now.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f8a2fdba
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "smu/smu_7_1_3_d.h" #include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h" #include "smu/smu_7_1_3_sh_mask.h"
#include "bif/bif_5_1_d.h" #include "bif/bif_5_1_d.h"
#include "gmc/gmc_8_1_d.h"
#include "vi.h" #include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
...@@ -672,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -672,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vm_id, bool ctx_switch)
{ {
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
amdgpu_ring_write(ring, vm_id);
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
...@@ -715,6 +719,57 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -715,6 +719,57 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
return r; return r;
} }
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
uint32_t reg;
if (vm_id < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 1 << vm_id);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
amdgpu_ring_write(ring, 1 << vm_id); /* mask */
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0xC);
}
static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0xE);
}
static bool uvd_v6_0_is_idle(void *handle) static bool uvd_v6_0_is_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
...@@ -951,7 +1006,7 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { ...@@ -951,7 +1006,7 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.set_powergating_state = uvd_v6_0_set_powergating_state, .set_powergating_state = uvd_v6_0_set_powergating_state,
}; };
static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.get_rptr = uvd_v6_0_ring_get_rptr, .get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr, .get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr, .set_wptr = uvd_v6_0_ring_set_wptr,
...@@ -966,9 +1021,32 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { ...@@ -966,9 +1021,32 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
}; };
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = NULL,
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = uvd_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; if (adev->asic_type >= CHIP_STONEY) {
adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
DRM_INFO("UVD is enabled in VM mode\n");
} else {
adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
DRM_INFO("UVD is enabled in physical mode\n");
}
} }
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
......
...@@ -111,6 +111,8 @@ ...@@ -111,6 +111,8 @@
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5 #define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4 #define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f #define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
#define mmUVD_GP_SCRATCH8 0x3c0a
#define mmUVD_GP_SCRATCH9 0x3c0b
#define mmUVD_GP_SCRATCH4 0x3d38 #define mmUVD_GP_SCRATCH4 0x3d38
#endif /* UVD_6_0_D_H */ #endif /* UVD_6_0_D_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment