Commit 5504eb16 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.2-2022-12-15' of...

Merge tag 'amd-drm-fixes-6.2-2022-12-15' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-fixes-6.2-2022-12-15:

amdgpu:
- Spelling fix
- BO pin fix
- Properly handle polaris 10/11 overlap asics
- GMC9 fix
- SR-IOV suspend fix
- DCN 3.1.4 fix
- KFD userptr locking fix
- SMU13.x fixes
- GDS/GWS/OA handling fix
- Reserved VMID handling fixes
- FRU EEPROM fix
- BO validation fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221215224936.6438-1-alexander.deucher@amd.com
parents 66efff51 7a18e089
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mmu_notifier.h>
#include <kgd_kfd_interface.h> #include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h" #include "amdgpu_sync.h"
...@@ -65,6 +66,7 @@ struct kgd_mem { ...@@ -65,6 +66,7 @@ struct kgd_mem {
struct mutex lock; struct mutex lock;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
struct hmm_range *range;
struct list_head attachments; struct list_head attachments;
/* protected by amdkfd_process_info.lock */ /* protected by amdkfd_process_info.lock */
struct ttm_validate_buffer validate_list; struct ttm_validate_buffer validate_list;
...@@ -75,7 +77,7 @@ struct kgd_mem { ...@@ -75,7 +77,7 @@ struct kgd_mem {
uint32_t alloc_flags; uint32_t alloc_flags;
atomic_t invalid; uint32_t invalid;
struct amdkfd_process_info *process_info; struct amdkfd_process_info *process_info;
struct amdgpu_sync sync; struct amdgpu_sync sync;
...@@ -131,7 +133,8 @@ struct amdkfd_process_info { ...@@ -131,7 +133,8 @@ struct amdkfd_process_info {
struct amdgpu_amdkfd_fence *eviction_fence; struct amdgpu_amdkfd_fence *eviction_fence;
/* MMU-notifier related fields */ /* MMU-notifier related fields */
atomic_t evicted_bos; struct mutex notifier_lock;
uint32_t evicted_bos;
struct delayed_work restore_userptr_work; struct delayed_work restore_userptr_work;
struct pid *pid; struct pid *pid;
bool block_mmu_notifications; bool block_mmu_notifications;
...@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); ...@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
#else #else
static inline static inline
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
...@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) ...@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
} }
static inline static inline
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem)
{ {
return 0; return 0;
} }
......
...@@ -4112,6 +4112,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) ...@@ -4112,6 +4112,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
adev->in_suspend = true; adev->in_suspend = true;
/* Evict the majority of BOs before grabbing the full access */
r = amdgpu_device_evict_resources(adev);
if (r)
return r;
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false); r = amdgpu_virt_request_full_gpu(adev, false);
......
...@@ -2039,6 +2039,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -2039,6 +2039,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
"See modparam exp_hw_support\n"); "See modparam exp_hw_support\n");
return -ENODEV; return -ENODEV;
} }
/* differentiate between P10 and P11 asics with the same DID */
if (pdev->device == 0x67FF &&
(pdev->revision == 0xE3 ||
pdev->revision == 0xE7 ||
pdev->revision == 0xF3 ||
pdev->revision == 0xF7)) {
flags &= ~AMD_ASIC_MASK;
flags |= CHIP_POLARIS10;
}
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
* however, SME requires an indirect IOMMU mapping because the encryption * however, SME requires an indirect IOMMU mapping because the encryption
...@@ -2108,12 +2117,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -2108,12 +2117,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ddev); pci_set_drvdata(pdev, ddev);
ret = amdgpu_driver_load_kms(adev, ent->driver_data); ret = amdgpu_driver_load_kms(adev, flags);
if (ret) if (ret)
goto err_pci; goto err_pci;
retry_init: retry_init:
ret = drm_dev_register(ddev, ent->driver_data); ret = drm_dev_register(ddev, flags);
if (ret == -EAGAIN && ++retry <= 3) { if (ret == -EAGAIN && ++retry <= 3) {
DRM_INFO("retry init %d\n", retry); DRM_INFO("retry init %d\n", retry);
/* Don't request EX mode too frequently which is attacking */ /* Don't request EX mode too frequently which is attacking */
......
...@@ -64,6 +64,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) ...@@ -64,6 +64,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
sizeof(atom_ctx->vbios_version)) || sizeof(atom_ctx->vbios_version)) ||
strnstr(atom_ctx->vbios_version, "D163", strnstr(atom_ctx->vbios_version, "D163",
sizeof(atom_ctx->vbios_version))) { sizeof(atom_ctx->vbios_version))) {
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6; *fru_addr = FRU_EEPROM_MADDR_6;
return true; return true;
} else { } else {
...@@ -83,6 +84,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) ...@@ -83,6 +84,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
sizeof(atom_ctx->vbios_version))) { sizeof(atom_ctx->vbios_version))) {
return false; return false;
} else { } else {
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6; *fru_addr = FRU_EEPROM_MADDR_6;
return true; return true;
} }
......
...@@ -113,7 +113,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -113,7 +113,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bp.resv = resv; bp.resv = resv;
bp.preferred_domain = initial_domain; bp.preferred_domain = initial_domain;
bp.flags = flags; bp.flags = flags;
bp.domain = initial_domain | AMDGPU_GEM_DOMAIN_CPU; bp.domain = initial_domain;
bp.bo_ptr_size = sizeof(struct amdgpu_bo); bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_create_user(adev, &bp, &ubo); r = amdgpu_bo_create_user(adev, &bp, &ubo);
...@@ -332,10 +332,20 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -332,10 +332,20 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
} }
initial_domain = (u32)(0xffffffff & args->in.domains); initial_domain = (u32)(0xffffffff & args->in.domains);
retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment, r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain, flags, ttm_bo_type_device, initial_domain,
resv, &gobj); flags, ttm_bo_type_device, resv, &gobj);
if (r && r != -ERESTARTSYS) { if (r && r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
goto retry;
}
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
}
DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
size, initial_domain, args->in.alignment, r); size, initial_domain, args->in.alignment, r);
} }
......
...@@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni, ...@@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
unsigned long cur_seq) unsigned long cur_seq)
{ {
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
if (!mmu_notifier_range_blockable(range)) if (!mmu_notifier_range_blockable(range))
return false; return false;
mutex_lock(&adev->notifier_lock); amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
mmu_interval_set_seq(mni, cur_seq);
amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
mutex_unlock(&adev->notifier_lock);
return true; return true;
} }
...@@ -244,9 +238,9 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, ...@@ -244,9 +238,9 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
return r; return r;
} }
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
{ {
int r; bool r;
r = mmu_interval_read_retry(hmm_range->notifier, r = mmu_interval_read_retry(hmm_range->notifier,
hmm_range->notifier_seq); hmm_range->notifier_seq);
......
...@@ -29,12 +29,13 @@ ...@@ -29,12 +29,13 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/interval_tree.h> #include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly, uint64_t start, uint64_t npages, bool readonly,
void *owner, struct page **pages, void *owner, struct page **pages,
struct hmm_range **phmm_range); struct hmm_range **phmm_range);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR) #if defined(CONFIG_HMM_MIRROR)
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
......
...@@ -165,6 +165,26 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, ...@@ -165,6 +165,26 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
atomic_read(&adev->gpu_reset_counter); atomic_read(&adev->gpu_reset_counter);
} }
/* Check if we need to switch to another set of resources */
static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
struct amdgpu_job *job)
{
return id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
id->gws_base != job->gws_base ||
id->gws_size != job->gws_size ||
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size;
}
/* Check if the id is compatible with the job */
static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
struct amdgpu_job *job)
{
return id->pd_gpu_addr == job->vm_pd_addr &&
!amdgpu_vmid_gds_switch_needed(id, job);
}
/** /**
* amdgpu_vmid_grab_idle - grab idle VMID * amdgpu_vmid_grab_idle - grab idle VMID
* *
...@@ -258,14 +278,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -258,14 +278,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx; uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update; bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm); uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r; int r;
*id = vm->reserved_vmid[vmhub]; *id = id_mgr->reserved;
if ((*id)->owner != vm->immediate.fence_context || if ((*id)->owner != vm->immediate.fence_context ||
(*id)->pd_gpu_addr != job->vm_pd_addr || !amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates || (*id)->flushed_updates < updates ||
!(*id)->last_flush || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context && ((*id)->last_flush->context != fence_context &&
...@@ -294,8 +315,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -294,8 +315,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
if (r) if (r)
return r; return r;
(*id)->flushed_updates = updates;
job->vm_needs_flush = needs_flush; job->vm_needs_flush = needs_flush;
job->spm_update_needed = true;
return 0; return 0;
} }
...@@ -333,7 +354,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, ...@@ -333,7 +354,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
if ((*id)->owner != vm->immediate.fence_context) if ((*id)->owner != vm->immediate.fence_context)
continue; continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr) if (!amdgpu_vmid_compatible(*id, job))
continue; continue;
if (!(*id)->last_flush || if (!(*id)->last_flush ||
...@@ -355,7 +376,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, ...@@ -355,7 +376,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
if (r) if (r)
return r; return r;
(*id)->flushed_updates = updates;
job->vm_needs_flush |= needs_flush; job->vm_needs_flush |= needs_flush;
return 0; return 0;
} }
...@@ -408,22 +428,30 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -408,22 +428,30 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r) if (r)
goto error; goto error;
id->flushed_updates = amdgpu_vm_tlb_seq(vm);
job->vm_needs_flush = true; job->vm_needs_flush = true;
} }
list_move_tail(&id->list, &id_mgr->ids_lru); list_move_tail(&id->list, &id_mgr->ids_lru);
} }
id->pd_gpu_addr = job->vm_pd_addr; job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
id->owner = vm->immediate.fence_context;
if (job->vm_needs_flush) { if (job->vm_needs_flush) {
id->flushed_updates = amdgpu_vm_tlb_seq(vm);
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
id->last_flush = NULL; id->last_flush = NULL;
} }
job->vmid = id - id_mgr->ids; job->vmid = id - id_mgr->ids;
job->pasid = vm->pasid; job->pasid = vm->pasid;
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
id->gws_base = job->gws_base;
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->immediate.fence_context;
trace_amdgpu_vm_grab_id(vm, ring, job); trace_amdgpu_vm_grab_id(vm, ring, job);
error: error:
...@@ -435,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, ...@@ -435,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
unsigned vmhub) unsigned vmhub)
{ {
struct amdgpu_vmid_mgr *id_mgr; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *idle;
int r = 0;
id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub]) if (vm->reserved_vmid[vmhub])
goto unlock; goto unlock;
if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
AMDGPU_VM_MAX_RESERVED_VMID) { ++id_mgr->reserved_use_count;
DRM_ERROR("Over limitation of reserved vmid\n"); if (!id_mgr->reserved) {
atomic_dec(&id_mgr->reserved_vmid_num); struct amdgpu_vmid *id;
r = -EINVAL;
goto unlock; id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
list);
/* Remove from normal round robin handling */
list_del_init(&id->list);
id_mgr->reserved = id;
} }
/* Select the first entry VMID */ vm->reserved_vmid[vmhub] = true;
idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
list_del_init(&idle->list);
vm->reserved_vmid[vmhub] = idle;
mutex_unlock(&id_mgr->lock);
return 0;
unlock: unlock:
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
return r; return 0;
} }
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
...@@ -469,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, ...@@ -469,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub]) { if (vm->reserved_vmid[vmhub] &&
list_add(&vm->reserved_vmid[vmhub]->list, !--id_mgr->reserved_use_count) {
&id_mgr->ids_lru); /* give the reserved ID back to normal round robin */
vm->reserved_vmid[vmhub] = NULL; list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
atomic_dec(&id_mgr->reserved_vmid_num);
} }
vm->reserved_vmid[vmhub] = false;
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
} }
...@@ -541,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) ...@@ -541,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock); mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru); INIT_LIST_HEAD(&id_mgr->ids_lru);
atomic_set(&id_mgr->reserved_vmid_num, 0); id_mgr->reserved_use_count = 0;
/* manage only VMIDs not used by KFD */ /* manage only VMIDs not used by KFD */
id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
......
...@@ -67,7 +67,8 @@ struct amdgpu_vmid_mgr { ...@@ -67,7 +67,8 @@ struct amdgpu_vmid_mgr {
unsigned num_ids; unsigned num_ids;
struct list_head ids_lru; struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID]; struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
atomic_t reserved_vmid_num; struct amdgpu_vmid *reserved;
unsigned int reserved_use_count;
}; };
int amdgpu_pasid_alloc(unsigned int bits); int amdgpu_pasid_alloc(unsigned int bits);
......
...@@ -53,6 +53,8 @@ struct amdgpu_job { ...@@ -53,6 +53,8 @@ struct amdgpu_job {
uint32_t preamble_status; uint32_t preamble_status;
uint32_t preemption_status; uint32_t preemption_status;
bool vm_needs_flush; bool vm_needs_flush;
bool gds_switch_needed;
bool spm_update_needed;
uint64_t vm_pd_addr; uint64_t vm_pd_addr;
unsigned vmid; unsigned vmid;
unsigned pasid; unsigned pasid;
......
...@@ -346,17 +346,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, ...@@ -346,17 +346,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
* @adev: amdgpu device object * @adev: amdgpu device object
* @offset: offset of the BO * @offset: offset of the BO
* @size: size of the BO * @size: size of the BO
* @domain: where to place it
* @bo_ptr: used to initialize BOs in structures * @bo_ptr: used to initialize BOs in structures
* @cpu_addr: optional CPU address mapping * @cpu_addr: optional CPU address mapping
* *
* Creates a kernel BO at a specific offset in the address space of the domain. * Creates a kernel BO at a specific offset in VRAM.
* *
* Returns: * Returns:
* 0 on success, negative error code otherwise. * 0 on success, negative error code otherwise.
*/ */
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size, uint32_t domain, uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr) struct amdgpu_bo **bo_ptr, void **cpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
...@@ -366,8 +365,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, ...@@ -366,8 +365,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
offset &= PAGE_MASK; offset &= PAGE_MASK;
size = ALIGN(size, PAGE_SIZE); size = ALIGN(size, PAGE_SIZE);
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
NULL, cpu_addr); AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
cpu_addr);
if (r) if (r)
return r; return r;
...@@ -422,6 +422,8 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, ...@@ -422,6 +422,8 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (*bo == NULL) if (*bo == NULL)
return; return;
WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
if (likely(amdgpu_bo_reserve(*bo, true) == 0)) { if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
if (cpu_addr) if (cpu_addr)
amdgpu_bo_kunmap(*bo); amdgpu_bo_kunmap(*bo);
...@@ -446,27 +448,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, ...@@ -446,27 +448,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
/* /*
* If GTT is part of requested domains the check must succeed to * If GTT is part of requested domains the check must succeed to
* allow fall back to GTT * allow fall back to GTT.
*/ */
if (domain & AMDGPU_GEM_DOMAIN_GTT) { if (domain & AMDGPU_GEM_DOMAIN_GTT) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
if (size < man->size) if (man && size < man->size)
return true; return true;
else else if (!man)
WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
goto fail; goto fail;
} } else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
if (size < man->size) if (man && size < man->size)
return true; return true;
else
goto fail; goto fail;
} }
/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */ /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
return true; return true;
...@@ -581,6 +580,10 @@ int amdgpu_bo_create(struct amdgpu_device *adev, ...@@ -581,6 +580,10 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bo->tbo.bdev = &adev->mman.bdev; bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
else
amdgpu_bo_placement_from_domain(bo, bp->domain); amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel) if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1; bo->tbo.priority = 1;
...@@ -1506,7 +1509,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) ...@@ -1506,7 +1509,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain) uint32_t domain)
{ {
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;
......
...@@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, ...@@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
u32 domain, struct amdgpu_bo **bo_ptr, u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr); u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size, uint32_t domain, uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr); struct amdgpu_bo **bo_ptr, void **cpu_addr);
int amdgpu_bo_create_user(struct amdgpu_device *adev, int amdgpu_bo_create_user(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp, struct amdgpu_bo_param *bp,
......
...@@ -695,8 +695,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, ...@@ -695,8 +695,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
return r; return r;
} }
/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
*/
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt && gtt->userptr && range)
amdgpu_hmm_range_get_pages_done(range);
}
/* /*
* amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
* Check if the pages backing this ttm range have been invalidated * Check if the pages backing this ttm range have been invalidated
* *
* Returns: true if pages are still valid * Returns: true if pages are still valid
...@@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, ...@@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
/*
* FIXME: Must always hold notifier_lock for this, and must
* not ignore the return code.
*/
return !amdgpu_hmm_range_get_pages_done(range); return !amdgpu_hmm_range_get_pages_done(range);
} }
#endif #endif
...@@ -1569,7 +1576,6 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) ...@@ -1569,7 +1576,6 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
return amdgpu_bo_create_kernel_at(adev, return amdgpu_bo_create_kernel_at(adev,
adev->mman.fw_vram_usage_start_offset, adev->mman.fw_vram_usage_start_offset,
adev->mman.fw_vram_usage_size, adev->mman.fw_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.fw_vram_usage_reserved_bo, &adev->mman.fw_vram_usage_reserved_bo,
&adev->mman.fw_vram_usage_va); &adev->mman.fw_vram_usage_va);
} }
...@@ -1595,7 +1601,6 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) ...@@ -1595,7 +1601,6 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
return amdgpu_bo_create_kernel_at(adev, return amdgpu_bo_create_kernel_at(adev,
adev->mman.drv_vram_usage_start_offset, adev->mman.drv_vram_usage_start_offset,
adev->mman.drv_vram_usage_size, adev->mman.drv_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.drv_vram_usage_reserved_bo, &adev->mman.drv_vram_usage_reserved_bo,
&adev->mman.drv_vram_usage_va); &adev->mman.drv_vram_usage_va);
} }
...@@ -1676,7 +1681,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ...@@ -1676,7 +1681,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
ret = amdgpu_bo_create_kernel_at(adev, ret = amdgpu_bo_create_kernel_at(adev,
ctx->c2p_train_data_offset, ctx->c2p_train_data_offset,
ctx->train_data_size, ctx->train_data_size,
AMDGPU_GEM_DOMAIN_VRAM,
&ctx->c2p_bo, &ctx->c2p_bo,
NULL); NULL);
if (ret) { if (ret) {
...@@ -1690,7 +1694,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ...@@ -1690,7 +1694,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
ret = amdgpu_bo_create_kernel_at(adev, ret = amdgpu_bo_create_kernel_at(adev,
adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
adev->mman.discovery_tmr_size, adev->mman.discovery_tmr_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.discovery_memory, &adev->mman.discovery_memory,
NULL); NULL);
if (ret) { if (ret) {
...@@ -1791,21 +1794,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1791,21 +1794,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* avoid display artifacts while transitioning between pre-OS * avoid display artifacts while transitioning between pre-OS
* and driver. */ * and driver. */
r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_vga_memory, &adev->mman.stolen_vga_memory,
NULL); NULL);
if (r) if (r)
return r; return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
adev->mman.stolen_extended_size, adev->mman.stolen_extended_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_extended_memory, &adev->mman.stolen_extended_memory,
NULL); NULL);
if (r) if (r)
return r; return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
adev->mman.stolen_reserved_size, adev->mman.stolen_reserved_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_reserved_memory, &adev->mman.stolen_reserved_memory,
NULL); NULL);
if (r) if (r)
......
...@@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); ...@@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
struct hmm_range **range); struct hmm_range **range);
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range); struct hmm_range *range);
#else #else
...@@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, ...@@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
{ {
return -EPERM; return -EPERM;
} }
static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range)
{
}
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range) struct hmm_range *range)
{ {
......
...@@ -395,7 +395,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) ...@@ -395,7 +395,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
*/ */
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL)) &bo, NULL))
DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
......
...@@ -484,25 +484,20 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, ...@@ -484,25 +484,20 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id;
bool gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
if (job->vmid == 0) if (job->vmid == 0)
return false; return false;
id = &id_mgr->ids[job->vmid];
gds_switch_needed = ring->funcs->emit_gds_switch && ( if (job->vm_needs_flush || ring->has_compute_vm_bug)
id->gds_base != job->gds_base || return true;
id->gds_size != job->gds_size ||
id->gws_base != job->gws_base || if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
id->gws_size != job->gws_size || return true;
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size); if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
if (amdgpu_vmid_had_gpu_reset(adev, id))
return true; return true;
return vm_flush_needed || gds_switch_needed; return false;
} }
/** /**
...@@ -524,27 +519,20 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, ...@@ -524,27 +519,20 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
bool gds_switch_needed = ring->funcs->emit_gds_switch && ( bool spm_update_needed = job->spm_update_needed;
id->gds_base != job->gds_base || bool gds_switch_needed = ring->funcs->emit_gds_switch &&
id->gds_size != job->gds_size || job->gds_switch_needed;
id->gws_base != job->gws_base ||
id->gws_size != job->gws_size ||
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush; bool vm_flush_needed = job->vm_needs_flush;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
bool pasid_mapping_needed = false; bool pasid_mapping_needed = false;
unsigned patch_offset = 0; unsigned patch_offset = 0;
bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
int r; int r;
if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
if (amdgpu_vmid_had_gpu_reset(adev, id)) { if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true; gds_switch_needed = true;
vm_flush_needed = true; vm_flush_needed = true;
pasid_mapping_needed = true; pasid_mapping_needed = true;
spm_update_needed = true;
} }
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
...@@ -577,6 +565,17 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, ...@@ -577,6 +565,17 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (pasid_mapping_needed) if (pasid_mapping_needed)
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
job->gws_size, job->oa_base,
job->oa_size);
}
if (vm_flush_needed || pasid_mapping_needed) { if (vm_flush_needed || pasid_mapping_needed) {
r = amdgpu_fence_emit(ring, &fence, NULL, 0); r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r) if (r)
...@@ -601,20 +600,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, ...@@ -601,20 +600,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
} }
dma_fence_put(fence); dma_fence_put(fence);
if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
gds_switch_needed) {
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
id->gws_base = job->gws_base;
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
job->gws_size, job->oa_base,
job->oa_size);
}
if (ring->funcs->patch_cond_exec) if (ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset); amdgpu_ring_patch_cond_exec(ring, patch_offset);
...@@ -2383,7 +2368,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -2383,7 +2368,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data; union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
long timeout = msecs_to_jiffies(2000);
int r; int r;
switch (args->in.op) { switch (args->in.op) {
...@@ -2395,21 +2379,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -2395,21 +2379,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r; return r;
break; break;
case AMDGPU_VM_OP_UNRESERVE_VMID: case AMDGPU_VM_OP_UNRESERVE_VMID:
if (amdgpu_sriov_runtime(adev))
timeout = 8 * timeout;
/* Wait vm idle to make sure the vmid set in SPM_VMID is
* not referenced anymore.
*/
r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
if (r)
return r;
r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
if (r < 0)
return r;
amdgpu_bo_unreserve(fpriv->vm.root.bo);
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break; break;
default: default:
......
...@@ -119,9 +119,6 @@ struct amdgpu_bo_vm; ...@@ -119,9 +119,6 @@ struct amdgpu_bo_vm;
/* Reserve 2MB at top/bottom of address space for kernel use */ /* Reserve 2MB at top/bottom of address space for kernel use */
#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
/* See vm_update_mode */ /* See vm_update_mode */
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
...@@ -298,8 +295,7 @@ struct amdgpu_vm { ...@@ -298,8 +295,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked; struct dma_fence *last_unlocked;
unsigned int pasid; unsigned int pasid;
/* dedicated to vm */ bool reserved_vmid[AMDGPU_MAX_VMHUBS];
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update; bool use_cpu_for_update;
......
...@@ -1185,6 +1185,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, ...@@ -1185,6 +1185,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping, struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags) uint64_t *flags)
{ {
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
*flags &= ~AMDGPU_PTE_EXECUTABLE; *flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
...@@ -1196,7 +1198,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, ...@@ -1196,7 +1198,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID; *flags &= ~AMDGPU_PTE_VALID;
} }
if (mapping->bo_va->base.bo) if (bo && bo->tbo.resource)
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo, gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
mapping, flags); mapping, flags);
} }
......
...@@ -1503,6 +1503,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1503,6 +1503,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1): case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6): case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true; init_data.flags.gpu_vm_support = true;
......
...@@ -55,7 +55,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub, ...@@ -55,7 +55,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
s = &wm->sets[1]; s = &wm->sets[1];
s->wm_set = 1; s->wm_set = 1;
...@@ -65,7 +65,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub, ...@@ -65,7 +65,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
s = &wm->sets[2]; s = &wm->sets[2];
s->wm_set = 2; s->wm_set = 2;
...@@ -75,7 +75,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub, ...@@ -75,7 +75,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
s = &wm->sets[3]; s = &wm->sets[3];
s->wm_set = 3; s->wm_set = 3;
...@@ -85,7 +85,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub, ...@@ -85,7 +85,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
} }
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow) void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
......
...@@ -159,7 +159,7 @@ static void dcn10_log_hubbub_state(struct dc *dc, ...@@ -159,7 +159,7 @@ static void dcn10_log_hubbub_state(struct dc *dc,
DTN_INFO_MICRO_SEC(s->pte_meta_urgent); DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
DTN_INFO_MICRO_SEC(s->sr_enter); DTN_INFO_MICRO_SEC(s->sr_enter);
DTN_INFO_MICRO_SEC(s->sr_exit); DTN_INFO_MICRO_SEC(s->sr_exit);
DTN_INFO_MICRO_SEC(s->dram_clk_chanage); DTN_INFO_MICRO_SEC(s->dram_clk_change);
DTN_INFO("\n"); DTN_INFO("\n");
} }
......
...@@ -83,7 +83,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i ...@@ -83,7 +83,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
memset(&wm, 0, sizeof(struct dcn_hubbub_wm)); memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm); dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n"); chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_change\n");
remaining_buffer -= chars_printed; remaining_buffer -= chars_printed;
pBuf += chars_printed; pBuf += chars_printed;
...@@ -98,7 +98,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i ...@@ -98,7 +98,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
(s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac, (s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac,
(s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac, (s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac,
(s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac, (s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac,
(s->dram_clk_chanage * frac) / ref_clk_mhz / frac, (s->dram_clk_chanage * frac) / ref_clk_mhz % frac); (s->dram_clk_change * frac) / ref_clk_mhz / frac, (s->dram_clk_change * frac) / ref_clk_mhz % frac);
remaining_buffer -= chars_printed; remaining_buffer -= chars_printed;
pBuf += chars_printed; pBuf += chars_printed;
} }
......
...@@ -500,7 +500,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub, ...@@ -500,7 +500,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
s = &wm->sets[1]; s = &wm->sets[1];
s->wm_set = 1; s->wm_set = 1;
...@@ -511,7 +511,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub, ...@@ -511,7 +511,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
s = &wm->sets[2]; s = &wm->sets[2];
s->wm_set = 2; s->wm_set = 2;
...@@ -522,7 +522,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub, ...@@ -522,7 +522,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
s = &wm->sets[3]; s = &wm->sets[3];
s->wm_set = 3; s->wm_set = 3;
...@@ -533,7 +533,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub, ...@@ -533,7 +533,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D); s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
} }
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
} }
void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
......
...@@ -635,7 +635,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, ...@@ -635,7 +635,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage); DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change);
s = &wm->sets[1]; s = &wm->sets[1];
s->wm_set = 1; s->wm_set = 1;
...@@ -649,7 +649,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, ...@@ -649,7 +649,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage); DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change);
s = &wm->sets[2]; s = &wm->sets[2];
s->wm_set = 2; s->wm_set = 2;
...@@ -663,7 +663,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, ...@@ -663,7 +663,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage); DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change);
s = &wm->sets[3]; s = &wm->sets[3];
s->wm_set = 3; s->wm_set = 3;
...@@ -677,7 +677,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, ...@@ -677,7 +677,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change);
} }
static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
......
...@@ -865,7 +865,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub, ...@@ -865,7 +865,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_chanage); DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
...@@ -885,7 +885,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub, ...@@ -885,7 +885,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_chanage); DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
...@@ -905,7 +905,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub, ...@@ -905,7 +905,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_chanage); DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain);
...@@ -925,7 +925,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub, ...@@ -925,7 +925,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_chanage); DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain);
......
...@@ -46,7 +46,7 @@ struct dcn_hubbub_wm_set { ...@@ -46,7 +46,7 @@ struct dcn_hubbub_wm_set {
uint32_t pte_meta_urgent; uint32_t pte_meta_urgent;
uint32_t sr_enter; uint32_t sr_enter;
uint32_t sr_exit; uint32_t sr_exit;
uint32_t dram_clk_chanage; uint32_t dram_clk_change;
uint32_t usr_retrain; uint32_t usr_retrain;
uint32_t fclk_pstate_change; uint32_t fclk_pstate_change;
}; };
......
...@@ -241,7 +241,8 @@ ...@@ -241,7 +241,8 @@
__SMU_DUMMY_MAP(GetGfxOffEntryCount), \ __SMU_DUMMY_MAP(GetGfxOffEntryCount), \
__SMU_DUMMY_MAP(LogGfxOffResidency), \ __SMU_DUMMY_MAP(LogGfxOffResidency), \
__SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \ __SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \
__SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
__SMU_DUMMY_MAP(AllowGpo),
#undef __SMU_DUMMY_MAP #undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type #define __SMU_DUMMY_MAP(type) SMU_MSG_##type
......
...@@ -272,6 +272,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu); ...@@ -272,6 +272,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu);
int smu_v13_0_run_btc(struct smu_context *smu); int smu_v13_0_run_btc(struct smu_context *smu);
int smu_v13_0_gpo_control(struct smu_context *smu,
bool enablement);
int smu_v13_0_deep_sleep_control(struct smu_context *smu, int smu_v13_0_deep_sleep_control(struct smu_context *smu,
bool enablement); bool enablement);
......
...@@ -2180,6 +2180,21 @@ int smu_v13_0_run_btc(struct smu_context *smu) ...@@ -2180,6 +2180,21 @@ int smu_v13_0_run_btc(struct smu_context *smu)
return res; return res;
} }
int smu_v13_0_gpo_control(struct smu_context *smu,
bool enablement)
{
int res;
res = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_AllowGpo,
enablement ? 1 : 0,
NULL);
if (res)
dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
return res;
}
int smu_v13_0_deep_sleep_control(struct smu_context *smu, int smu_v13_0_deep_sleep_control(struct smu_context *smu,
bool enablement) bool enablement)
{ {
......
...@@ -144,6 +144,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = ...@@ -144,6 +144,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
}; };
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
...@@ -210,6 +211,8 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = ...@@ -210,6 +211,8 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(MEM_TEMP_READ), FEA_MAP(MEM_TEMP_READ),
FEA_MAP(ATHUB_MMHUB_PG), FEA_MAP(ATHUB_MMHUB_PG),
FEA_MAP(SOC_PCC), FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
}; };
static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
...@@ -540,6 +543,23 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) ...@@ -540,6 +543,23 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
dpm_table); dpm_table);
if (ret) if (ret)
return ret; return ret;
/*
* Update the reported maximum shader clock to the value
* which can be guarded to be achieved on all cards. This
* is aligned with Window setting. And considering that value
* might be not the peak frequency the card can achieve, it
* is normal some real-time clock frequency can overtake this
* labelled maximum clock frequency(for example in pp_dpm_sclk
* sysfs output).
*/
if (skutable->DriverReportedClocks.GameClockAc &&
(dpm_table->dpm_levels[dpm_table->count - 1].value >
skutable->DriverReportedClocks.GameClockAc)) {
dpm_table->dpm_levels[dpm_table->count - 1].value =
skutable->DriverReportedClocks.GameClockAc;
dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
}
} else { } else {
dpm_table->count = 1; dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
...@@ -802,6 +822,57 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, ...@@ -802,6 +822,57 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
return ret; return ret;
} }
static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
uint32_t *max)
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
struct smu_13_0_dpm_table *dpm_table;
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
/* uclk dpm table */
dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
/* gfxclk dpm table */
dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
/* socclk dpm table */
dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
/* fclk dpm table */
dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
case SMU_VCLK1:
/* vclk dpm table */
dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
case SMU_DCLK1:
/* dclk dpm table */
dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
dev_err(smu->adev->dev, "Unsupported clock type!\n");
return -EINVAL;
}
if (min)
*min = dpm_table->min;
if (max)
*max = dpm_table->max;
return 0;
}
static int smu_v13_0_0_read_sensor(struct smu_context *smu, static int smu_v13_0_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, enum amd_pp_sensors sensor,
void *data, void *data,
...@@ -1304,8 +1375,16 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) ...@@ -1304,8 +1375,16 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.fclk_table; &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table = struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table; &smu->pstate_table;
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
DriverReportedClocks_t driver_clocks =
pptable->SkuTable.DriverReportedClocks;
pstate_table->gfxclk_pstate.min = gfx_table->min; pstate_table->gfxclk_pstate.min = gfx_table->min;
if (driver_clocks.GameClockAc &&
(driver_clocks.GameClockAc < gfx_table->max))
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
else
pstate_table->gfxclk_pstate.peak = gfx_table->max; pstate_table->gfxclk_pstate.peak = gfx_table->max;
pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.min = mem_table->min;
...@@ -1323,12 +1402,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) ...@@ -1323,12 +1402,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
pstate_table->fclk_pstate.min = fclk_table->min; pstate_table->fclk_pstate.min = fclk_table->min;
pstate_table->fclk_pstate.peak = fclk_table->max; pstate_table->fclk_pstate.peak = fclk_table->max;
/* if (driver_clocks.BaseClockAc &&
* For now, just use the mininum clock frequency. driver_clocks.BaseClockAc < gfx_table->max)
* TODO: update them when the real pstate settings available pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
*/ else
pstate_table->gfxclk_pstate.standard = gfx_table->min; pstate_table->gfxclk_pstate.standard = gfx_table->max;
pstate_table->uclk_pstate.standard = mem_table->min; pstate_table->uclk_pstate.standard = mem_table->max;
pstate_table->socclk_pstate.standard = soc_table->min; pstate_table->socclk_pstate.standard = soc_table->min;
pstate_table->vclk_pstate.standard = vclk_table->min; pstate_table->vclk_pstate.standard = vclk_table->min;
pstate_table->dclk_pstate.standard = dclk_table->min; pstate_table->dclk_pstate.standard = dclk_table->min;
...@@ -1899,7 +1978,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { ...@@ -1899,7 +1978,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_enabled_mask = smu_cmn_get_enabled_mask, .get_enabled_mask = smu_cmn_get_enabled_mask,
.dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.read_sensor = smu_v13_0_0_read_sensor, .read_sensor = smu_v13_0_0_read_sensor,
.feature_is_enabled = smu_cmn_feature_is_enabled, .feature_is_enabled = smu_cmn_feature_is_enabled,
...@@ -1947,6 +2026,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { ...@@ -1947,6 +2026,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.set_df_cstate = smu_v13_0_0_set_df_cstate, .set_df_cstate = smu_v13_0_0_set_df_cstate,
.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num, .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag, .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
}; };
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
......
...@@ -123,6 +123,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = ...@@ -123,6 +123,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
}; };
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
...@@ -189,6 +190,8 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = ...@@ -189,6 +190,8 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(MEM_TEMP_READ), FEA_MAP(MEM_TEMP_READ),
FEA_MAP(ATHUB_MMHUB_PG), FEA_MAP(ATHUB_MMHUB_PG),
FEA_MAP(SOC_PCC), FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
}; };
static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
...@@ -1688,6 +1691,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { ...@@ -1688,6 +1691,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.mode1_reset = smu_v13_0_mode1_reset, .mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state, .set_mp1_state = smu_v13_0_7_set_mp1_state,
.set_df_cstate = smu_v13_0_7_set_df_cstate, .set_df_cstate = smu_v13_0_7_set_df_cstate,
.gpo_control = smu_v13_0_gpo_control,
}; };
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment