Commit ae0c7306 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Bunch of amdgpu fixes mostly all going to stable.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180620190021.2775-1-alexander.deucher@amd.com
parents 47fbf82b 7303b39e
...@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) ...@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
switch (asic_type) { switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC) #if defined(CONFIG_DRM_AMD_DC)
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI: case CHIP_KAVERI:
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_MULLINS: case CHIP_MULLINS:
/*
* We have systems in the wild with these ASICs that require
* LVDS and VGA support which is not supported with DC.
*
* Fallback to the non-DC driver here by default so as not to
* cause regressions.
*/
return amdgpu_dc > 0;
case CHIP_HAWAII:
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
case CHIP_POLARIS10: case CHIP_POLARIS10:
......
...@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) { if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo); adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) { } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
adev->gart_pin_size += amdgpu_bo_size(bo); adev->gart_pin_size += amdgpu_bo_size(bo);
} }
...@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) ...@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->pin_count--; bo->pin_count--;
if (bo->pin_count) if (bo->pin_count)
return 0; return 0;
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
adev->vram_pin_size -= amdgpu_bo_size(bo); adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) { } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
adev->gart_pin_size -= amdgpu_bo_size(bo); adev->gart_pin_size -= amdgpu_bo_size(bo);
} }
error: for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r))
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
return r; return r;
} }
......
...@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); ...@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
......
...@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned version_major, version_minor, family_id; unsigned version_major, version_minor, family_id;
int i, j, r; int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) { switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
...@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) ...@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
void *ptr; void *ptr;
int i, j; int i, j;
cancel_delayed_work_sync(&adev->uvd.idle_work);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL) if (adev->uvd.inst[j].vcpu_bo == NULL)
continue; continue;
cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
/* only valid for physical mode */ /* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) { if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
...@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
static void amdgpu_uvd_idle_work_handler(struct work_struct *work) static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{ {
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = 0, i, j; unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
...@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) ...@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
} else { } else {
schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
} }
} }
...@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
if (set_clocks) { if (set_clocks) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true); amdgpu_dpm_enable_uvd(adev, true);
...@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{ {
if (!amdgpu_sriov_vf(ring->adev)) if (!amdgpu_sriov_vf(ring->adev))
schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
} }
/** /**
......
...@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst { ...@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
void *saved_bo; void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
struct amdgpu_ring ring; struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
...@@ -62,6 +61,7 @@ struct amdgpu_uvd { ...@@ -62,6 +61,7 @@ struct amdgpu_uvd {
bool address_64_bit; bool address_64_bit;
bool use_ctx_buf; bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
struct delayed_work idle_work;
}; };
int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
......
...@@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, ...@@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
adev->gmc.visible_vram_size : end) - start; adev->gmc.visible_vram_size : end) - start;
} }
/**
* amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
*
* @bo: &amdgpu_bo buffer object (must be in VRAM)
*
* Returns:
* How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
*/
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_mem_reg *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
u64 usage = 0;
if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
return 0;
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return amdgpu_bo_size(bo);
while (nodes && pages) {
usage += nodes->size << PAGE_SHIFT;
usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
pages -= nodes->size;
++nodes;
}
return usage;
}
/** /**
* amdgpu_vram_mgr_new - allocate new ranges * amdgpu_vram_mgr_new - allocate new ranges
* *
...@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
} }
nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
GFP_KERNEL | __GFP_ZERO);
if (!nodes) if (!nodes)
return -ENOMEM; return -ENOMEM;
...@@ -190,7 +223,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -190,7 +223,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
drm_mm_remove_node(&nodes[i]); drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
kfree(nodes); kvfree(nodes);
return r == -ENOSPC ? 0 : r; return r == -ENOSPC ? 0 : r;
} }
...@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, ...@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
atomic64_sub(usage, &mgr->usage); atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage); atomic64_sub(vis_usage, &mgr->vis_usage);
kfree(mem->mm_node); kvfree(mem->mm_node);
mem->mm_node = NULL; mem->mm_node = NULL;
} }
......
...@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) ...@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
{ {
struct amdgpu_device *adev = hwmgr->adev; struct amdgpu_device *adev = hwmgr->adev;
int result; int result = 0;
uint32_t num_se = 0; uint32_t num_se = 0;
uint32_t count, data; uint32_t count, data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment