Commit bb61cf46 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.10-2024-05-30' of...

Merge tag 'amd-drm-fixes-6.10-2024-05-30' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.10-2024-05-30:

amdgpu:
- RAS fix
- Fix colorspace property for MST connectors
- Fix for PCIe DPM
- Silence UBSAN warning
- GPUVM robustness fix
- Partition fix
- Drop deprecated I2C_CLASS_SPD

amdkfd:
- Revert unused changes for certain 11.0.3 devices
- Simplify APU VRAM handling
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240530202316.2246826-1-alexander.deucher@amd.com
parents c301c3d2 67c7d4fa
...@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, ...@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
system_mem_needed = size; system_mem_needed = size;
ttm_mem_needed = size; ttm_mem_needed = size;
} }
...@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, ...@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) { if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed; adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] += adev->kfd.vram_used_aligned[xcp_id] +=
(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ? (adev->flags & AMD_IS_APU) ?
vram_needed : vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
} }
...@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, ...@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) { if (adev) {
adev->kfd.vram_used[xcp_id] -= size; adev->kfd.vram_used[xcp_id] -= size;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
adev->kfd.vram_used_aligned[xcp_id] -= size; adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size; kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size; kfd_mem_limit.ttm_mem_used -= size;
...@@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, ...@@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is * if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device * allowed for both small and large BAR configurations of peer device
*/ */
if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) && if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
...@@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, ...@@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- atomic64_read(&adev->vram_pin_size) - atomic64_read(&adev->vram_pin_size)
- reserved_for_pt; - reserved_for_pt;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
system_mem_available = no_system_mem_limit ? system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit : kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit - kfd_mem_limit.max_system_mem_limit -
...@@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0; alloc_flags = 0;
...@@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) { if (size) {
if (!is_imported && if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) && ((adev->flags & AMD_IS_APU) &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size; *size = bo_size;
else else
...@@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev, ...@@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo; (*mem)->bo = bo;
(*mem)->va = va; (*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
!(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ? !(adev->flags & AMD_IS_APU) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0; (*mem)->mapped_to_gpu_memory = 0;
......
...@@ -5944,6 +5944,7 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, ...@@ -5944,6 +5944,7 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*speed = PCI_SPEED_UNKNOWN; *speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN;
if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
while ((parent = pci_upstream_bridge(parent))) { while ((parent = pci_upstream_bridge(parent))) {
/* skip upstream/downstream switches internal to dGPU*/ /* skip upstream/downstream switches internal to dGPU*/
if (parent->vendor == PCI_VENDOR_ID_ATI) if (parent->vendor == PCI_VENDOR_ID_ATI)
...@@ -5952,6 +5953,10 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, ...@@ -5952,6 +5953,10 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*width = pcie_get_width_cap(parent); *width = pcie_get_width_cap(parent);
break; break;
} }
} else {
/* use the current speeds rather than max if switching is not supported */
pcie_bandwidth_available(adev->pdev, NULL, speed, width);
}
} }
/** /**
......
...@@ -46,7 +46,7 @@ struct amdgpu_iv_entry; ...@@ -46,7 +46,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7) #define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8) #define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11) #define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13) #define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31) #define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000 #define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000
......
...@@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params, ...@@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry) struct amdgpu_vm_bo_base *entry)
{ {
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
struct amdgpu_bo *bo = parent->bo, *pbo; struct amdgpu_bo *bo, *pbo;
struct amdgpu_vm *vm = params->vm; struct amdgpu_vm *vm = params->vm;
uint64_t pde, pt, flags; uint64_t pde, pt, flags;
unsigned int level; unsigned int level;
if (WARN_ON(!parent))
return -EINVAL;
bo = parent->bo;
for (level = 0, pbo = bo->parent; pbo; ++level) for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent; pbo = pbo->parent;
......
...@@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) ...@@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->gmc.num_mem_partitions == num_xcc / 2) if (adev->gmc.num_mem_partitions == num_xcc / 2)
return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE : return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
AMDGPU_QPX_PARTITION_MODE; AMDGPU_CPX_PARTITION_MODE;
if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU)) if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
return AMDGPU_DPX_PARTITION_MODE; return AMDGPU_DPX_PARTITION_MODE;
......
...@@ -408,13 +408,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) ...@@ -408,13 +408,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd; f2g = &gfx_v11_kfd2kgd;
break; break;
case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 3):
if ((adev->pdev->device == 0x7460 &&
adev->pdev->revision == 0x00) ||
(adev->pdev->device == 0x7461 &&
adev->pdev->revision == 0x00))
/* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
gfx_target_version = 110005;
else
/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
gfx_target_version = 110001; gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd; f2g = &gfx_v11_kfd2kgd;
......
...@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev) ...@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1)) if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL; return -EINVAL;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return 0; return 0;
pgmap = &kfddev->pgmap; pgmap = &kfddev->pgmap;
......
...@@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange, ...@@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1; return -1;
} }
if (node->adev->gmc.is_app_apu || if (node->adev->flags & AMD_IS_APU)
node->adev->flags & AMD_IS_APU)
return 0; return 0;
if (prange->preferred_loc == gpuid || if (prange->preferred_loc == gpuid ||
...@@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange) ...@@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out; goto out;
} }
if (bo_node->adev->gmc.is_app_apu || if (bo_node->adev->flags & AMD_IS_APU) {
bo_node->adev->flags & AMD_IS_APU) {
best_loc = 0; best_loc = 0;
goto out; goto out;
} }
......
...@@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s ...@@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory. * is initialized to not 0 when page migration register device memory.
*/ */
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
(adev)->gmc.is_app_apu ||\
((adev)->flags & AMD_IS_APU)) ((adev)->flags & AMD_IS_APU))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
......
...@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
&connector->base, &connector->base,
dev->mode_config.tile_property, dev->mode_config.tile_property,
0); 0);
connector->colorspace_property = master->base.colorspace_property;
if (connector->colorspace_property)
drm_connector_attach_colorspace_property(connector);
drm_connector_set_path_property(connector, pathprop); drm_connector_set_path_property(connector, pathprop);
......
...@@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4 ...@@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
uint8_t phase_delay_us; // phase delay in unit of micro second uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved; uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value uint32_t gpio_mask_val; // GPIO Mask value
struct atom_voltage_gpio_map_lut voltage_gpio_lut[1]; struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
}; };
struct atom_svid2_voltage_object_v4 struct atom_svid2_voltage_object_v4
......
...@@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) ...@@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i; smu_i2c->port = i;
mutex_init(&smu_i2c->mutex); mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE; control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev; control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v14_0_2_i2c_algo; control->algo = &smu_v14_0_2_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment