Commit d175e9ac authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher

drm/amdgpu: flush TLB functions removal from kfd2kgd interface

[Why]
kfd2kgd interface will be deprecated. This removal only covers TLB
invalidation for now. They have been replaced in amdgpu_amdkfd API.

[How]
TLB invalidate functions removed from the different amdkfd_gfx_v*
versions.
Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ffa02269
...@@ -320,7 +320,5 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { ...@@ -320,7 +320,5 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config, .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id, .get_hive_id = amdgpu_amdkfd_get_hive_id,
}; };
...@@ -686,71 +686,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, ...@@ -686,71 +686,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
} }
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
{
signed long r;
uint32_t seq;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
spin_lock(&adev->gfx.kiq.ring_lock);
amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
amdgpu_ring_write(ring,
PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
PACKET3_INVALIDATE_TLBS_PASID(pasid));
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
return -ETIME;
}
return 0;
}
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
uint16_t queried_pasid;
bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
if (amdgpu_emu_mode == 0 && ring->sched.ready)
return invalidate_tlbs_with_kiq(adev, pasid);
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
&queried_pasid);
if (ret && queried_pasid == pasid) {
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
AMDGPU_GFXHUB_0, 0);
break;
}
}
return 0;
}
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("non kfd vmid %d\n", vmid);
return 0;
}
amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
return 0;
}
static int kgd_address_watch_disable(struct kgd_dev *kgd) static int kgd_address_watch_disable(struct kgd_dev *kgd)
{ {
return 0; return 0;
...@@ -832,7 +767,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { ...@@ -832,7 +767,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
get_atc_vmid_pasid_mapping_info, get_atc_vmid_pasid_mapping_info,
.get_tile_config = amdgpu_amdkfd_get_tile_config, .get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base, .set_vm_context_page_table_base = set_vm_context_page_table_base,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id, .get_hive_id = amdgpu_amdkfd_get_hive_id,
}; };
...@@ -696,45 +696,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, ...@@ -696,45 +696,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base)); lower_32_bits(page_table_base));
} }
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
unsigned int tmp;
if (adev->in_gpu_reset)
return -EIO;
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
RREG32(mmVM_INVALIDATE_RESPONSE);
break;
}
}
return 0;
}
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("non kfd vmid\n");
return 0;
}
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
/** /**
* read_vmid_from_vmfault_reg - read vmid from register * read_vmid_from_vmfault_reg - read vmid from register
* *
...@@ -771,7 +732,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { ...@@ -771,7 +732,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va, .set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config, .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base, .set_vm_context_page_table_base = set_vm_context_page_table_base,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
}; };
...@@ -657,45 +657,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, ...@@ -657,45 +657,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base)); lower_32_bits(page_table_base));
} }
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
unsigned int tmp;
if (adev->in_gpu_reset)
return -EIO;
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
RREG32(mmVM_INVALIDATE_RESPONSE);
break;
}
}
return 0;
}
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("non kfd vmid %d\n", vmid);
return -EINVAL;
}
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
const struct kfd2kgd_calls gfx_v8_kfd2kgd = { const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings, .program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
...@@ -717,6 +678,4 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { ...@@ -717,6 +678,4 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va, .set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config, .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base, .set_vm_context_page_table_base = set_vm_context_page_table_base,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
}; };
...@@ -617,100 +617,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, ...@@ -617,100 +617,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
} }
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type)
{
signed long r;
uint32_t seq;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
spin_lock(&adev->gfx.kiq.ring_lock);
amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
amdgpu_ring_write(ring,
PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
PACKET3_INVALIDATE_TLBS_PASID(pasid) |
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
return -ETIME;
}
return 0;
}
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid, i;
uint16_t queried_pasid;
bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
uint32_t flush_type = 0;
if (adev->in_gpu_reset)
return -EIO;
if (adev->gmc.xgmi.num_physical_nodes &&
adev->asic_type == CHIP_VEGA20)
flush_type = 2;
if (ring->sched.ready)
return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
&queried_pasid);
if (ret && queried_pasid == pasid) {
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
i, flush_type);
break;
}
}
return 0;
}
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int i;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("non kfd vmid %d\n", vmid);
return 0;
}
/* Use legacy mode tlb invalidation.
*
* Currently on Raven the code below is broken for anything but
* legacy mode due to a MMHUB power gating problem. A workaround
* is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
* == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
* bit.
*
* TODO 1: agree on the right set of invalidation registers for
* KFD use. Use the last one for now. Invalidate both GC and
* MMHUB.
*
* TODO 2: support range-based invalidation, requires kfg2kgd
* interface change
*/
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
return 0;
}
int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd) int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
{ {
return 0; return 0;
...@@ -793,7 +699,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { ...@@ -793,7 +699,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config, .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id, .get_hive_id = amdgpu_amdkfd_get_hive_id,
}; };
...@@ -57,7 +57,5 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, ...@@ -57,7 +57,5 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid); uint8_t vmid, uint16_t *p_pasid);
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config); struct tile_config *config);
...@@ -307,8 +307,6 @@ struct kfd2kgd_calls { ...@@ -307,8 +307,6 @@ struct kfd2kgd_calls {
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base); uint32_t vmid, uint64_t page_table_base);
int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd); uint64_t (*get_hive_id)(struct kgd_dev *kgd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment