Commit e61b2ad3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2021-09-24' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Quiet week this week, just some i915 and amd fixes, just getting ready
  for my all nighter maintainer summit!

  Summary:

  i915:
   - Fix ADL-P memory bandwidth parameters
   - Fix memory corruption due to a double free
   - Fix memory leak in DMC firmware handling

  amdgpu:
   - Update MAINTAINERS entry for powerplay
   - Fix empty macros
   - SI DPM fix

  amdkfd:
   - SVM fixes
   - DMA mapping fix"

* tag 'drm-fixes-2021-09-24' of git://anongit.freedesktop.org/drm/drm:
  drm/amdkfd: fix svm_migrate_fini warning
  drm/amdkfd: handle svm migrate init error
  drm/amd/pm: Update intermediate power state for SI
  drm/amdkfd: fix dma mapping leaking warning
  drm/amdkfd: SVM map to gpus check vma boundary
  MAINTAINERS: fix up entry for AMD Powerplay
  drm/amd/display: fix empty debug macros
  drm/i915: Free all DMC payloads
  drm/i915: Move __i915_gem_free_object to ttm_bo_destroy
  drm/i915: Update memory bandwidth parameters
parents f9e36107 ef88d7a8
...@@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org ...@@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained S: Maintained
F: drivers/platform/x86/amd-pmc.* F: drivers/platform/x86/amd-pmc.*
AMD POWERPLAY AMD POWERPLAY AND SWSMU
M: Evan Quan <evan.quan@amd.com> M: Evan Quan <evan.quan@amd.com>
L: amd-gfx@lists.freedesktop.org L: amd-gfx@lists.freedesktop.org
S: Supported S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/ F: drivers/gpu/drm/amd/pm/
AMD PTDMA DRIVER AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com> M: Sanjay R Mehta <sanju.mehta@amd.com>
......
...@@ -971,7 +971,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -971,7 +971,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
void kgd2kfd_device_exit(struct kfd_dev *kfd) void kgd2kfd_device_exit(struct kfd_dev *kfd)
{ {
if (kfd->init_complete) { if (kfd->init_complete) {
svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
device_queue_manager_uninit(kfd->dqm); device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd); kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd); kfd_topology_remove_device(kfd);
......
...@@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev) ...@@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev)
pgmap->ops = &svm_migrate_pgmap_ops; pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
/* Device manager releases device-specific resources, memory region and
* pgmap when driver disconnects from device.
*/
r = devm_memremap_pages(adev->dev, pgmap); r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) { if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n"); pr_err("failed to register HMM device memory\n");
/* Disable SVM support capability */
pgmap->type = 0;
devm_release_mem_region(adev->dev, res->start, devm_release_mem_region(adev->dev, res->start,
res->end - res->start + 1); res->end - res->start + 1);
return PTR_ERR(r); return PTR_ERR(r);
...@@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev) ...@@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev)
return 0; return 0;
} }
void svm_migrate_fini(struct amdgpu_device *adev)
{
struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap;
devm_memunmap_pages(adev->dev, pgmap);
devm_release_mem_region(adev->dev, pgmap->range.start,
pgmap->range.end - pgmap->range.start + 1);
}
...@@ -47,7 +47,6 @@ unsigned long ...@@ -47,7 +47,6 @@ unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
int svm_migrate_init(struct amdgpu_device *adev); int svm_migrate_init(struct amdgpu_device *adev);
void svm_migrate_fini(struct amdgpu_device *adev);
#else #else
...@@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev) ...@@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev)
{ {
return 0; return 0;
} }
static inline void svm_migrate_fini(struct amdgpu_device *adev)
{
/* empty */
}
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
......
...@@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange) ...@@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange)
mmu_interval_notifier_remove(&prange->notifier); mmu_interval_notifier_remove(&prange->notifier);
} }
static bool
svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr && !dma_mapping_error(dev, dma_addr) &&
!(dma_addr & SVM_RANGE_VRAM_DOMAIN);
}
static int static int
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long offset, unsigned long npages, unsigned long offset, unsigned long npages,
...@@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, ...@@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
addr += offset; addr += offset;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]), if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
"leaking dma mapping\n"))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]); page = hmm_pfn_to_page(hmm_pfns[i]);
...@@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, ...@@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
return; return;
for (i = offset; i < offset + npages; i++) { for (i = offset; i < offset + npages; i++) {
if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
continue; continue;
pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
...@@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned long last_start; unsigned long last_start;
int last_domain; int last_domain;
int r = 0; int r = 0;
int64_t i; int64_t i, j;
last_start = prange->start + offset; last_start = prange->start + offset;
...@@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = offset; i < offset + npages; i++) { for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
if ((prange->start + i) < prange->last &&
/* Collect all pages in the same address range and memory domain
* that can be mapped with a single call to update mapping.
*/
if (i < offset + npages - 1 &&
last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
continue; continue;
...@@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
NULL, dma_addr, NULL, dma_addr,
&vm->last_update, &vm->last_update,
&table_freed); &table_freed);
for (j = last_start - prange->start; j <= i; j++)
dma_addr[j] |= last_domain;
if (r) { if (r) {
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
goto out; goto out;
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#define DC_LOGGER \ #define DC_LOGGER \
engine->ctx->logger engine->ctx->logger
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
#define IS_DC_I2CAUX_LOGGING_ENABLED() (false) #define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
#define LOG_FLAG_Error_I2cAux LOG_ERROR #define LOG_FLAG_Error_I2cAux LOG_ERROR
#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
...@@ -76,7 +76,7 @@ enum { ...@@ -76,7 +76,7 @@ enum {
#define DEFAULT_AUX_ENGINE_MULT 0 #define DEFAULT_AUX_ENGINE_MULT 0
#define DEFAULT_AUX_ENGINE_LENGTH 69 #define DEFAULT_AUX_ENGINE_LENGTH 69
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
static void release_engine( static void release_engine(
struct dce_aux *engine) struct dce_aux *engine)
......
...@@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev) ...@@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev); si_thermal_start_thermal_controller(adev);
ni_update_current_ps(adev, boot_ps);
return 0; return 0;
} }
......
...@@ -222,31 +222,42 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi) ...@@ -222,31 +222,42 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
struct intel_sa_info { struct intel_sa_info {
u16 displayrtids; u16 displayrtids;
u8 deburst, deprogbwlimit; u8 deburst, deprogbwlimit, derating;
}; };
static const struct intel_sa_info icl_sa_info = { static const struct intel_sa_info icl_sa_info = {
.deburst = 8, .deburst = 8,
.deprogbwlimit = 25, /* GB/s */ .deprogbwlimit = 25, /* GB/s */
.displayrtids = 128, .displayrtids = 128,
.derating = 10,
}; };
static const struct intel_sa_info tgl_sa_info = { static const struct intel_sa_info tgl_sa_info = {
.deburst = 16, .deburst = 16,
.deprogbwlimit = 34, /* GB/s */ .deprogbwlimit = 34, /* GB/s */
.displayrtids = 256, .displayrtids = 256,
.derating = 10,
}; };
static const struct intel_sa_info rkl_sa_info = { static const struct intel_sa_info rkl_sa_info = {
.deburst = 16, .deburst = 16,
.deprogbwlimit = 20, /* GB/s */ .deprogbwlimit = 20, /* GB/s */
.displayrtids = 128, .displayrtids = 128,
.derating = 10,
}; };
static const struct intel_sa_info adls_sa_info = { static const struct intel_sa_info adls_sa_info = {
.deburst = 16, .deburst = 16,
.deprogbwlimit = 38, /* GB/s */ .deprogbwlimit = 38, /* GB/s */
.displayrtids = 256, .displayrtids = 256,
.derating = 10,
};
static const struct intel_sa_info adlp_sa_info = {
.deburst = 16,
.deprogbwlimit = 38, /* GB/s */
.displayrtids = 256,
.derating = 20,
}; };
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
...@@ -302,7 +313,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel ...@@ -302,7 +313,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
bi->deratedbw[j] = min(maxdebw, bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */ bw * (100 - sa->derating) / 100);
drm_dbg_kms(&dev_priv->drm, drm_dbg_kms(&dev_priv->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n", "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
...@@ -400,7 +411,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) ...@@ -400,7 +411,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (IS_DG2(dev_priv)) if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv); dg2_get_bw_info(dev_priv);
else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) else if (IS_ALDERLAKE_P(dev_priv))
icl_get_bw_info(dev_priv, &adlp_sa_info);
else if (IS_ALDERLAKE_S(dev_priv))
icl_get_bw_info(dev_priv, &adls_sa_info); icl_get_bw_info(dev_priv, &adls_sa_info);
else if (IS_ROCKETLAKE(dev_priv)) else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info); icl_get_bw_info(dev_priv, &rkl_sa_info);
......
...@@ -805,11 +805,14 @@ void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv) ...@@ -805,11 +805,14 @@ void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
*/ */
void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
{ {
int id;
if (!HAS_DMC(dev_priv)) if (!HAS_DMC(dev_priv))
return; return;
intel_dmc_ucode_suspend(dev_priv); intel_dmc_ucode_suspend(dev_priv);
drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload); for (id = 0; id < DMC_FW_MAX; id++)
kfree(dev_priv->dmc.dmc_info[id].payload);
} }
...@@ -356,11 +356,8 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) ...@@ -356,11 +356,8 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{ {
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
if (likely(obj)) { if (likely(obj))
/* This releases all gem object bindings to the backend. */
i915_ttm_free_cached_io_st(obj); i915_ttm_free_cached_io_st(obj);
__i915_gem_free_object(obj);
}
} }
static struct intel_memory_region * static struct intel_memory_region *
...@@ -875,8 +872,12 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) ...@@ -875,8 +872,12 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{ {
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
/* This releases all gem object bindings to the backend. */
__i915_gem_free_object(obj);
i915_gem_object_release_memory_region(obj); i915_gem_object_release_memory_region(obj);
mutex_destroy(&obj->ttm.get_io_page.lock); mutex_destroy(&obj->ttm.get_io_page.lock);
if (obj->ttm.created) if (obj->ttm.created)
call_rcu(&obj->rcu, __i915_gem_free_object_rcu); call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment