Commit 6cfd7775 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.5' of git://people.freedesktop.org/~agd5f/linux into drm-next

A few more misc things for radeon and amdgpu for 4.5:
- TTM fixes for imported buffers
- amdgpu fixes to avoid -ENOMEM in CS ioctl
- CZ UVD and VCE clock force options for debugging video issues
- A couple of ACP prerequisites
- Misc fixes

* 'drm-next-4.5' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: validate duplicates first
  drm/amdgpu: move VM page tables to the LRU end on CS v2
  drm/ttm: add ttm_bo_move_to_lru_tail function v2
  drm/ttm: fix adding foreign BOs to the swap LRU
  drm/ttm: fix adding foreign BOs to the LRU during init v2
  drm/radeon: use kobj_to_dev()
  drm/amdgpu: use kobj_to_dev()
  drm/amdgpu/cz: force vce clocks when sclks are forced
  drm/amdgpu/cz: force uvd clocks when sclks are forced
  drm/amdgpu/cz: add code to enable forcing VCE clocks
  drm/amdgpu/cz: add code to enable forcing UVD clocks
  drm/amdgpu: fix lost sync_to if scheduler is enabled.
  drm/amd/powerplay: fix static checker warning for return meaningless value.
  drm/amdgpu: add irq domain support
  drm/amdgpu/cgs: add an interface to access PCI resources
parents 8a0d560f d8e0cae6
...@@ -987,6 +987,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, ...@@ -987,6 +987,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated, struct list_head *validated,
struct amdgpu_bo_list_entry *entry); struct amdgpu_bo_list_entry *entry);
void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync); struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring, void amdgpu_vm_flush(struct amdgpu_ring *ring,
......
...@@ -398,6 +398,41 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, ...@@ -398,6 +398,41 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
WARN(ret, "pci_write_config_dword error"); WARN(ret, "pci_write_config_dword error");
} }
static int amdgpu_cgs_get_pci_resource(void *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
uint64_t offset,
uint64_t *resource_base)
{
CGS_FUNC_ADEV;
if (resource_base == NULL)
return -EINVAL;
switch (resource_type) {
case CGS_RESOURCE_TYPE_MMIO:
if (adev->rmmio_size == 0)
return -ENOENT;
if ((offset + size) > adev->rmmio_size)
return -EINVAL;
*resource_base = adev->rmmio_base;
return 0;
case CGS_RESOURCE_TYPE_DOORBELL:
if (adev->doorbell.size == 0)
return -ENOENT;
if ((offset + size) > adev->doorbell.size)
return -EINVAL;
*resource_base = adev->doorbell.base;
return 0;
case CGS_RESOURCE_TYPE_FB:
case CGS_RESOURCE_TYPE_IO:
case CGS_RESOURCE_TYPE_ROM:
default:
return -EINVAL;
}
}
static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
unsigned table, uint16_t *size, unsigned table, uint16_t *size,
uint8_t *frev, uint8_t *crev) uint8_t *frev, uint8_t *crev)
...@@ -1041,6 +1076,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { ...@@ -1041,6 +1076,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_write_pci_config_byte, amdgpu_cgs_write_pci_config_byte,
amdgpu_cgs_write_pci_config_word, amdgpu_cgs_write_pci_config_word,
amdgpu_cgs_write_pci_config_dword, amdgpu_cgs_write_pci_config_dword,
amdgpu_cgs_get_pci_resource,
amdgpu_cgs_atom_get_data_table, amdgpu_cgs_atom_get_data_table,
amdgpu_cgs_atom_get_cmd_table_revs, amdgpu_cgs_atom_get_cmd_table_revs,
amdgpu_cgs_atom_exec_cmd_table, amdgpu_cgs_atom_exec_cmd_table,
......
...@@ -421,15 +421,17 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) ...@@ -421,15 +421,17 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
if (r) if (r)
goto error_validate; goto error_validate;
r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
error_validate: error_validate:
if (r) if (r) {
amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
ttm_eu_backoff_reservation(&p->ticket, &p->validated); ttm_eu_backoff_reservation(&p->ticket, &p->validated);
}
error_reserve: error_reserve:
if (need_mmap_lock) if (need_mmap_lock)
...@@ -473,8 +475,11 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, ...@@ -473,8 +475,11 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
**/ **/
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
{ {
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
unsigned i; unsigned i;
amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
if (!error) { if (!error) {
/* Sort the buffer list from the smallest to largest buffer, /* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list. * which affects the order of buffers in the LRU list.
......
...@@ -312,6 +312,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, ...@@ -312,6 +312,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
} }
adev->irq.sources[src_id] = source; adev->irq.sources[src_id] = source;
return 0; return 0;
} }
...@@ -335,15 +336,19 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, ...@@ -335,15 +336,19 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
return; return;
} }
src = adev->irq.sources[src_id]; if (adev->irq.virq[src_id]) {
if (!src) { generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); } else {
return; src = adev->irq.sources[src_id];
} if (!src) {
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
return;
}
r = src->funcs->process(adev, src, entry); r = src->funcs->process(adev, src, entry);
if (r) if (r)
DRM_ERROR("error processing interrupt (%d)\n", r); DRM_ERROR("error processing interrupt (%d)\n", r);
}
} }
/** /**
...@@ -461,3 +466,90 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, ...@@ -461,3 +466,90 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return !!atomic_read(&src->enabled_types[type]); return !!atomic_read(&src->enabled_types[type]);
} }
/* gen irq */
static void amdgpu_irq_mask(struct irq_data *irqd)
{
/* XXX */
}
static void amdgpu_irq_unmask(struct irq_data *irqd)
{
/* XXX */
}
static struct irq_chip amdgpu_irq_chip = {
.name = "amdgpu-ih",
.irq_mask = amdgpu_irq_mask,
.irq_unmask = amdgpu_irq_unmask,
};
static int amdgpu_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
return -EPERM;
irq_set_chip_and_handler(irq,
&amdgpu_irq_chip, handle_simple_irq);
return 0;
}
static struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};
/**
* amdgpu_irq_add_domain - create a linear irq domain
*
* @adev: amdgpu device pointer
*
* Create an irq domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
&amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
DRM_ERROR("GPU irq add domain failed\n");
return -ENODEV;
}
return 0;
}
/**
* amdgpu_irq_remove_domain - remove the irq domain
*
* @adev: amdgpu device pointer
*
* Remove the irq domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
*/
void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
{
if (adev->irq.domain) {
irq_domain_remove(adev->irq.domain);
adev->irq.domain = NULL;
}
}
/**
* amdgpu_irq_create_mapping - create a mapping between a domain irq and a
* Linux irq
*
* @adev: amdgpu device pointer
* @src_id: IH source id
*
* Create a mapping between a domain irq (GPU IH src id) and a Linux irq
* Use this for components that generate a GPU interrupt, but are driven
* by a different driver (e.g., ACP).
* Returns the Linux irq.
*/
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
{
adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
return adev->irq.virq[src_id];
}
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#ifndef __AMDGPU_IRQ_H__ #ifndef __AMDGPU_IRQ_H__
#define __AMDGPU_IRQ_H__ #define __AMDGPU_IRQ_H__
#include <linux/irqdomain.h>
#include "amdgpu_ih.h" #include "amdgpu_ih.h"
#define AMDGPU_MAX_IRQ_SRC_ID 0x100 #define AMDGPU_MAX_IRQ_SRC_ID 0x100
...@@ -65,6 +66,10 @@ struct amdgpu_irq { ...@@ -65,6 +66,10 @@ struct amdgpu_irq {
/* interrupt ring */ /* interrupt ring */
struct amdgpu_ih_ring ih; struct amdgpu_ih_ring ih;
const struct amdgpu_ih_funcs *ih_funcs; const struct amdgpu_ih_funcs *ih_funcs;
/* gen irq stuff */
struct irq_domain *domain; /* GPU irq controller domain */
unsigned virq[AMDGPU_MAX_IRQ_SRC_ID];
}; };
void amdgpu_irq_preinstall(struct drm_device *dev); void amdgpu_irq_preinstall(struct drm_device *dev);
...@@ -90,4 +95,8 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, ...@@ -90,4 +95,8 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type); unsigned type);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
#endif #endif
...@@ -326,7 +326,7 @@ static struct attribute *hwmon_attributes[] = { ...@@ -326,7 +326,7 @@ static struct attribute *hwmon_attributes[] = {
static umode_t hwmon_attributes_visible(struct kobject *kobj, static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index) struct attribute *attr, int index)
{ {
struct device *dev = container_of(kobj, struct device, kobj); struct device *dev = kobj_to_dev(kobj);
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode; umode_t effective_mode = attr->mode;
......
...@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, ...@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
fence = to_amdgpu_fence(sync->sync_to[i]); fence = to_amdgpu_fence(sync->sync_to[i]);
/* check if we really need to sync */ /* check if we really need to sync */
if (!amdgpu_fence_need_sync(fence, ring)) if (!amdgpu_enable_scheduler &&
!amdgpu_fence_need_sync(fence, ring))
continue; continue;
/* prevent GPU deadlocks */ /* prevent GPU deadlocks */
...@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, ...@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
} }
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
r = fence_wait(&fence->base, true); r = fence_wait(sync->sync_to[i], true);
if (r) if (r)
return r; return r;
continue; continue;
......
...@@ -119,6 +119,33 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) ...@@ -119,6 +119,33 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
list_add(&entry->tv.head, duplicates); list_add(&entry->tv.head, duplicates);
} }
}
/**
* amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
*
* @adev: amdgpu device instance
* @vm: vm providing the BOs
*
* Move the PT BOs to the tail of the LRU.
*/
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct ttm_bo_global *glob = adev->mman.bdev.glob;
unsigned i;
spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
if (!entry->robj)
continue;
ttm_bo_move_to_lru_tail(&entry->robj->tbo);
}
spin_unlock(&glob->lru_lock);
} }
/** /**
......
...@@ -274,6 +274,11 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev) ...@@ -274,6 +274,11 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev)
static int cik_ih_early_init(void *handle) static int cik_ih_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = amdgpu_irq_add_domain(adev);
if (ret)
return ret;
cik_ih_set_interrupt_funcs(adev); cik_ih_set_interrupt_funcs(adev);
...@@ -300,6 +305,7 @@ static int cik_ih_sw_fini(void *handle) ...@@ -300,6 +305,7 @@ static int cik_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev); amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev); amdgpu_ih_ring_fini(adev);
amdgpu_irq_remove_domain(adev);
return 0; return 0;
} }
......
...@@ -1078,6 +1078,37 @@ static uint32_t cz_get_eclk_level(struct amdgpu_device *adev, ...@@ -1078,6 +1078,37 @@ static uint32_t cz_get_eclk_level(struct amdgpu_device *adev,
return i; return i;
} }
static uint32_t cz_get_uvd_level(struct amdgpu_device *adev,
uint32_t clock, uint16_t msg)
{
int i = 0;
struct amdgpu_uvd_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
switch (msg) {
case PPSMC_MSG_SetUvdSoftMin:
case PPSMC_MSG_SetUvdHardMin:
for (i = 0; i < table->count; i++)
if (clock <= table->entries[i].vclk)
break;
if (i == table->count)
i = table->count - 1;
break;
case PPSMC_MSG_SetUvdSoftMax:
case PPSMC_MSG_SetUvdHardMax:
for (i = table->count - 1; i >= 0; i--)
if (clock >= table->entries[i].vclk)
break;
if (i < 0)
i = 0;
break;
default:
break;
}
return i;
}
static int cz_program_bootup_state(struct amdgpu_device *adev) static int cz_program_bootup_state(struct amdgpu_device *adev)
{ {
struct cz_power_info *pi = cz_get_pi(adev); struct cz_power_info *pi = cz_get_pi(adev);
...@@ -1739,6 +1770,200 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) ...@@ -1739,6 +1770,200 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
return 0; return 0;
} }
static int cz_dpm_uvd_force_highest(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
int ret = 0;
if (pi->uvd_dpm.soft_min_clk != pi->uvd_dpm.soft_max_clk) {
pi->uvd_dpm.soft_min_clk =
pi->uvd_dpm.soft_max_clk;
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetUvdSoftMin,
cz_get_uvd_level(adev,
pi->uvd_dpm.soft_min_clk,
PPSMC_MSG_SetUvdSoftMin));
if (ret)
return ret;
}
return ret;
}
static int cz_dpm_uvd_force_lowest(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
int ret = 0;
if (pi->uvd_dpm.soft_max_clk != pi->uvd_dpm.soft_min_clk) {
pi->uvd_dpm.soft_max_clk = pi->uvd_dpm.soft_min_clk;
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetUvdSoftMax,
cz_get_uvd_level(adev,
pi->uvd_dpm.soft_max_clk,
PPSMC_MSG_SetUvdSoftMax));
if (ret)
return ret;
}
return ret;
}
static uint32_t cz_dpm_get_max_uvd_level(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
if (!pi->max_uvd_level) {
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
pi->max_uvd_level = cz_get_argument(adev) + 1;
}
if (pi->max_uvd_level > CZ_MAX_HARDWARE_POWERLEVELS) {
DRM_ERROR("Invalid max uvd level!\n");
return -EINVAL;
}
return pi->max_uvd_level;
}
static int cz_dpm_unforce_uvd_dpm_levels(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
struct amdgpu_uvd_clock_voltage_dependency_table *dep_table =
&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
uint32_t level = 0;
int ret = 0;
pi->uvd_dpm.soft_min_clk = dep_table->entries[0].vclk;
level = cz_dpm_get_max_uvd_level(adev) - 1;
if (level < dep_table->count)
pi->uvd_dpm.soft_max_clk = dep_table->entries[level].vclk;
else
pi->uvd_dpm.soft_max_clk =
dep_table->entries[dep_table->count - 1].vclk;
/* get min/max sclk soft value
* notify SMU to execute */
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetUvdSoftMin,
cz_get_uvd_level(adev,
pi->uvd_dpm.soft_min_clk,
PPSMC_MSG_SetUvdSoftMin));
if (ret)
return ret;
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetUvdSoftMax,
cz_get_uvd_level(adev,
pi->uvd_dpm.soft_max_clk,
PPSMC_MSG_SetUvdSoftMax));
if (ret)
return ret;
DRM_DEBUG("DPM uvd unforce state min=%d, max=%d.\n",
pi->uvd_dpm.soft_min_clk,
pi->uvd_dpm.soft_max_clk);
return 0;
}
static int cz_dpm_vce_force_highest(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
int ret = 0;
if (pi->vce_dpm.soft_min_clk != pi->vce_dpm.soft_max_clk) {
pi->vce_dpm.soft_min_clk =
pi->vce_dpm.soft_max_clk;
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetEclkSoftMin,
cz_get_eclk_level(adev,
pi->vce_dpm.soft_min_clk,
PPSMC_MSG_SetEclkSoftMin));
if (ret)
return ret;
}
return ret;
}
static int cz_dpm_vce_force_lowest(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
int ret = 0;
if (pi->vce_dpm.soft_max_clk != pi->vce_dpm.soft_min_clk) {
pi->vce_dpm.soft_max_clk = pi->vce_dpm.soft_min_clk;
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetEclkSoftMax,
cz_get_uvd_level(adev,
pi->vce_dpm.soft_max_clk,
PPSMC_MSG_SetEclkSoftMax));
if (ret)
return ret;
}
return ret;
}
static uint32_t cz_dpm_get_max_vce_level(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
if (!pi->max_vce_level) {
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
pi->max_vce_level = cz_get_argument(adev) + 1;
}
if (pi->max_vce_level > CZ_MAX_HARDWARE_POWERLEVELS) {
DRM_ERROR("Invalid max vce level!\n");
return -EINVAL;
}
return pi->max_vce_level;
}
static int cz_dpm_unforce_vce_dpm_levels(struct amdgpu_device *adev)
{
struct cz_power_info *pi = cz_get_pi(adev);
struct amdgpu_vce_clock_voltage_dependency_table *dep_table =
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
uint32_t level = 0;
int ret = 0;
pi->vce_dpm.soft_min_clk = dep_table->entries[0].ecclk;
level = cz_dpm_get_max_vce_level(adev) - 1;
if (level < dep_table->count)
pi->vce_dpm.soft_max_clk = dep_table->entries[level].ecclk;
else
pi->vce_dpm.soft_max_clk =
dep_table->entries[dep_table->count - 1].ecclk;
/* get min/max sclk soft value
* notify SMU to execute */
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetEclkSoftMin,
cz_get_eclk_level(adev,
pi->vce_dpm.soft_min_clk,
PPSMC_MSG_SetEclkSoftMin));
if (ret)
return ret;
ret = cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetEclkSoftMax,
cz_get_eclk_level(adev,
pi->vce_dpm.soft_max_clk,
PPSMC_MSG_SetEclkSoftMax));
if (ret)
return ret;
DRM_DEBUG("DPM vce unforce state min=%d, max=%d.\n",
pi->vce_dpm.soft_min_clk,
pi->vce_dpm.soft_max_clk);
return 0;
}
static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
enum amdgpu_dpm_forced_level level) enum amdgpu_dpm_forced_level level)
{ {
...@@ -1746,23 +1971,68 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, ...@@ -1746,23 +1971,68 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
switch (level) { switch (level) {
case AMDGPU_DPM_FORCED_LEVEL_HIGH: case AMDGPU_DPM_FORCED_LEVEL_HIGH:
/* sclk */
ret = cz_dpm_unforce_dpm_levels(adev); ret = cz_dpm_unforce_dpm_levels(adev);
if (ret) if (ret)
return ret; return ret;
ret = cz_dpm_force_highest(adev); ret = cz_dpm_force_highest(adev);
if (ret)
return ret;
/* uvd */
ret = cz_dpm_unforce_uvd_dpm_levels(adev);
if (ret)
return ret;
ret = cz_dpm_uvd_force_highest(adev);
if (ret)
return ret;
/* vce */
ret = cz_dpm_unforce_vce_dpm_levels(adev);
if (ret)
return ret;
ret = cz_dpm_vce_force_highest(adev);
if (ret) if (ret)
return ret; return ret;
break; break;
case AMDGPU_DPM_FORCED_LEVEL_LOW: case AMDGPU_DPM_FORCED_LEVEL_LOW:
/* sclk */
ret = cz_dpm_unforce_dpm_levels(adev); ret = cz_dpm_unforce_dpm_levels(adev);
if (ret) if (ret)
return ret; return ret;
ret = cz_dpm_force_lowest(adev); ret = cz_dpm_force_lowest(adev);
if (ret)
return ret;
/* uvd */
ret = cz_dpm_unforce_uvd_dpm_levels(adev);
if (ret)
return ret;
ret = cz_dpm_uvd_force_lowest(adev);
if (ret)
return ret;
/* vce */
ret = cz_dpm_unforce_vce_dpm_levels(adev);
if (ret)
return ret;
ret = cz_dpm_vce_force_lowest(adev);
if (ret) if (ret)
return ret; return ret;
break; break;
case AMDGPU_DPM_FORCED_LEVEL_AUTO: case AMDGPU_DPM_FORCED_LEVEL_AUTO:
/* sclk */
ret = cz_dpm_unforce_dpm_levels(adev); ret = cz_dpm_unforce_dpm_levels(adev);
if (ret)
return ret;
/* uvd */
ret = cz_dpm_unforce_uvd_dpm_levels(adev);
if (ret)
return ret;
/* vce */
ret = cz_dpm_unforce_vce_dpm_levels(adev);
if (ret) if (ret)
return ret; return ret;
break; break;
...@@ -1905,7 +2175,8 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev) ...@@ -1905,7 +2175,8 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev)
pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
} else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; /* leave it as set by user */
/*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
} }
cz_send_msg_to_smc_with_parameter(adev, cz_send_msg_to_smc_with_parameter(adev,
......
...@@ -183,6 +183,8 @@ struct cz_power_info { ...@@ -183,6 +183,8 @@ struct cz_power_info {
uint32_t voltage_drop_threshold; uint32_t voltage_drop_threshold;
uint32_t gfx_pg_threshold; uint32_t gfx_pg_threshold;
uint32_t max_sclk_level; uint32_t max_sclk_level;
uint32_t max_uvd_level;
uint32_t max_vce_level;
/* flags */ /* flags */
bool didt_enabled; bool didt_enabled;
bool video_start; bool video_start;
......
...@@ -253,8 +253,14 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev) ...@@ -253,8 +253,14 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev)
static int cz_ih_early_init(void *handle) static int cz_ih_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = amdgpu_irq_add_domain(adev);
if (ret)
return ret;
cz_ih_set_interrupt_funcs(adev); cz_ih_set_interrupt_funcs(adev);
return 0; return 0;
} }
...@@ -278,6 +284,7 @@ static int cz_ih_sw_fini(void *handle) ...@@ -278,6 +284,7 @@ static int cz_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev); amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev); amdgpu_ih_ring_fini(adev);
amdgpu_irq_remove_domain(adev);
return 0; return 0;
} }
......
...@@ -253,8 +253,14 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev) ...@@ -253,8 +253,14 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev)
static int iceland_ih_early_init(void *handle) static int iceland_ih_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = amdgpu_irq_add_domain(adev);
if (ret)
return ret;
iceland_ih_set_interrupt_funcs(adev); iceland_ih_set_interrupt_funcs(adev);
return 0; return 0;
} }
...@@ -278,6 +284,7 @@ static int iceland_ih_sw_fini(void *handle) ...@@ -278,6 +284,7 @@ static int iceland_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev); amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev); amdgpu_ih_ring_fini(adev);
amdgpu_irq_remove_domain(adev);
return 0; return 0;
} }
......
...@@ -273,8 +273,14 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev) ...@@ -273,8 +273,14 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev)
static int tonga_ih_early_init(void *handle) static int tonga_ih_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = amdgpu_irq_add_domain(adev);
if (ret)
return ret;
tonga_ih_set_interrupt_funcs(adev); tonga_ih_set_interrupt_funcs(adev);
return 0; return 0;
} }
...@@ -301,6 +307,7 @@ static int tonga_ih_sw_fini(void *handle) ...@@ -301,6 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev); amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev); amdgpu_ih_ring_fini(adev);
amdgpu_irq_add_domain(adev);
return 0; return 0;
} }
......
...@@ -122,6 +122,17 @@ struct cgs_system_info { ...@@ -122,6 +122,17 @@ struct cgs_system_info {
uint64_t padding[13]; uint64_t padding[13];
}; };
/*
* enum cgs_resource_type - GPU resource type
*/
enum cgs_resource_type {
CGS_RESOURCE_TYPE_MMIO = 0,
CGS_RESOURCE_TYPE_FB,
CGS_RESOURCE_TYPE_IO,
CGS_RESOURCE_TYPE_DOORBELL,
CGS_RESOURCE_TYPE_ROM,
};
/** /**
* struct cgs_clock_limits - Clock limits * struct cgs_clock_limits - Clock limits
* *
...@@ -417,6 +428,23 @@ typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr, ...@@ -417,6 +428,23 @@ typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr, typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
uint32_t value); uint32_t value);
/**
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
* @cgs_device: opaque device handle
* @resource_type: Type of Resource (MMIO, IO, ROM, FB, DOORBELL)
* @size: size of the region
* @offset: offset from the start of the region
* @resource_base: base address (not including offset) returned
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_get_pci_resource_t)(void *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
uint64_t offset,
uint64_t *resource_base);
/** /**
* cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
* @cgs_device: opaque device handle * @cgs_device: opaque device handle
...@@ -593,6 +621,8 @@ struct cgs_ops { ...@@ -593,6 +621,8 @@ struct cgs_ops {
cgs_write_pci_config_byte_t write_pci_config_byte; cgs_write_pci_config_byte_t write_pci_config_byte;
cgs_write_pci_config_word_t write_pci_config_word; cgs_write_pci_config_word_t write_pci_config_word;
cgs_write_pci_config_dword_t write_pci_config_dword; cgs_write_pci_config_dword_t write_pci_config_dword;
/* PCI resources */
cgs_get_pci_resource_t get_pci_resource;
/* ATOM BIOS */ /* ATOM BIOS */
cgs_atom_get_data_table_t atom_get_data_table; cgs_atom_get_data_table_t atom_get_data_table;
cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs; cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
...@@ -708,5 +738,9 @@ struct cgs_device ...@@ -708,5 +738,9 @@ struct cgs_device
CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
#define cgs_query_system_info(dev, sys_info) \ #define cgs_query_system_info(dev, sys_info) \
CGS_CALL(query_system_info, dev, sys_info) CGS_CALL(query_system_info, dev, sys_info)
#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \
resource_base) \
CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
resource_base)
#endif /* _CGS_COMMON_H */ #endif /* _CGS_COMMON_H */
...@@ -199,7 +199,7 @@ static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) ...@@ -199,7 +199,7 @@ static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
PP_ASSERT_WITH_CODE( PP_ASSERT_WITH_CODE(
1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
"Failed to send Previous Message.", "Failed to send Previous Message.",
return 1); );
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
...@@ -207,7 +207,7 @@ static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) ...@@ -207,7 +207,7 @@ static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
PP_ASSERT_WITH_CODE( PP_ASSERT_WITH_CODE(
1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
"Failed to send Message.", "Failed to send Message.",
return 1); );
return 0; return 0;
} }
...@@ -229,7 +229,7 @@ static int tonga_send_msg_to_smc_without_waiting ...@@ -229,7 +229,7 @@ static int tonga_send_msg_to_smc_without_waiting
PP_ASSERT_WITH_CODE( PP_ASSERT_WITH_CODE(
1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
"Failed to send Previous Message.", "Failed to send Previous Message.",
return 0); );
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
return 0; return 0;
......
...@@ -713,7 +713,7 @@ static struct attribute *hwmon_attributes[] = { ...@@ -713,7 +713,7 @@ static struct attribute *hwmon_attributes[] = {
static umode_t hwmon_attributes_visible(struct kobject *kobj, static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index) struct attribute *attr, int index)
{ {
struct device *dev = container_of(kobj, struct device, kobj); struct device *dev = kobj_to_dev(kobj);
struct radeon_device *rdev = dev_get_drvdata(dev); struct radeon_device *rdev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode; umode_t effective_mode = attr->mode;
......
...@@ -176,7 +176,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) ...@@ -176,7 +176,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
list_add_tail(&bo->lru, &man->lru); list_add_tail(&bo->lru, &man->lru);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
if (bo->ttm != NULL) { if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
list_add_tail(&bo->swap, &bo->glob->swap_lru); list_add_tail(&bo->swap, &bo->glob->swap_lru);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
} }
...@@ -228,6 +228,27 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) ...@@ -228,6 +228,27 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
} }
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
lockdep_assert_held(&bo->resv->lock.base);
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
list_del_init(&bo->swap);
list_del_init(&bo->lru);
} else {
if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
list_move_tail(&bo->swap, &bo->glob->swap_lru);
man = &bdev->man[bo->mem.mem_type];
list_move_tail(&bo->lru, &man->lru);
}
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
/* /*
* Call bo->mutex locked. * Call bo->mutex locked.
*/ */
...@@ -1170,9 +1191,15 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1170,9 +1191,15 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
if (likely(!ret)) if (likely(!ret))
ret = ttm_bo_validate(bo, placement, interruptible, false); ret = ttm_bo_validate(bo, placement, interruptible, false);
if (!resv) if (!resv) {
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
} else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
}
if (unlikely(ret)) if (unlikely(ret))
ttm_bo_unref(&bo); ttm_bo_unref(&bo);
......
...@@ -383,6 +383,16 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); ...@@ -383,6 +383,16 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
*/ */
extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
* object. This function must be called with struct ttm_bo_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
/** /**
* ttm_bo_lock_delayed_workqueue * ttm_bo_lock_delayed_workqueue
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment