Commit 1420f63b authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux into drm-next

Just some bug fixes and vega10 updates for 4.12.

* 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: fix fence memory leak in wait_all_fence V2
  drm/amdgpu: fix "fix 64bit division"
  drm/amd/powerplay: add fan controller table v11 support.
  drm/amd/powerplay: port newest process pptable code for vega10.
  drm/amdgpu: set vm size and block size by individual gmc by default (v3)
  drm/amdgpu: Avoid overflows/divide-by-zero in latency_watermark calculations.
  drm/amdgpu: Make display watermark calculations more accurate
  drm/radeon: fix typo in bandwidth calculation
  drm/radeon: Refuse to migrate a prime BO to VRAM. (v2)
  drm/radeon: Maintain prime import/export refcount for BOs
  drm/amdgpu: Refuse to pin or change acceptable domains of prime BOs to VRAM. (v2)
  drm/amdgpu: Fail fb creation from imported dma-bufs. (v2)
  drm/radeon: Fail fb creation from imported dma-bufs.
parents 2b2fc72a 32df87df
......@@ -1242,6 +1242,7 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
if (r < 0)
return r;
......
......@@ -1040,35 +1040,31 @@ static bool amdgpu_check_pot_argument(int arg)
return (arg & (arg - 1)) == 0;
}
static void amdgpu_get_block_size(struct amdgpu_device *adev)
static void amdgpu_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (amdgpu_vm_block_size == -1) {
/* Total bits covered by PD + PTs */
unsigned bits = ilog2(amdgpu_vm_size) + 18;
/* Make sure the PD is 4K in size up to 8GB address space.
Above that split equal between PD and PTs */
if (amdgpu_vm_size <= 8)
amdgpu_vm_block_size = bits - 9;
else
amdgpu_vm_block_size = (bits + 3) / 2;
if (amdgpu_vm_block_size == -1)
return;
} else if (amdgpu_vm_block_size < 9) {
if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
goto def_value;
}
if (amdgpu_vm_block_size > 24 ||
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
dev_warn(adev->dev, "VM page table size (%d) too large\n",
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
goto def_value;
}
return;
def_value:
amdgpu_vm_block_size = -1;
}
static void amdgpu_check_vm_size(struct amdgpu_device *adev)
......@@ -1097,8 +1093,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
return;
def_value:
amdgpu_vm_size = 8;
dev_info(adev->dev, "set default VM size %dGB\n", amdgpu_vm_size);
amdgpu_vm_size = -1;
}
/**
......@@ -1132,7 +1127,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_check_vm_size(adev);
amdgpu_get_block_size(adev);
amdgpu_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
......
......@@ -614,6 +614,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
......
......@@ -86,7 +86,7 @@ int amdgpu_runtime_pm = -1;
unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = 64;
int amdgpu_vm_size = -1;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
......
......@@ -729,6 +729,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
break;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
amdgpu_bo_unreserve(robj);
......
......@@ -650,6 +650,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
return -EINVAL;
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
......
......@@ -554,6 +554,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
uint64_t offset = page_offset;
page_offset = do_div(offset, size);
mm += offset;
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
}
......
......@@ -2064,6 +2064,44 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
}
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
/* Total bits covered by PD + PTs */
unsigned bits = ilog2(vm_size) + 18;
/* Make sure the PD is 4K in size up to 8GB address space.
Above that split equal between PD and PTs */
if (vm_size <= 8)
return (bits - 9);
else
return ((bits + 3) / 2);
}
/**
* amdgpu_vm_adjust_size - adjust vm size and block size
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
{
/* adjust vm size firstly */
if (amdgpu_vm_size == -1)
adev->vm_manager.vm_size = vm_size;
else
adev->vm_manager.vm_size = amdgpu_vm_size;
/* block size depends on vm size */
if (amdgpu_vm_block_size == -1)
adev->vm_manager.block_size =
amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
else
adev->vm_manager.block_size = amdgpu_vm_block_size;
DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
}
/**
* amdgpu_vm_init - initialize a vm instance
*
......
......@@ -234,5 +234,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
#endif
......@@ -1090,23 +1090,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
......@@ -1214,14 +1201,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
......@@ -1236,7 +1223,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......@@ -1275,7 +1262,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......
......@@ -1059,23 +1059,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
......@@ -1183,14 +1170,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
......@@ -1205,7 +1192,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......@@ -1244,7 +1231,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......
......@@ -861,23 +861,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
......@@ -986,7 +973,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce6_wm_params wm_low, wm_high;
u32 dram_channels;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
......@@ -996,8 +983,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
......@@ -1016,7 +1003,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......@@ -1043,7 +1030,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......
......@@ -974,23 +974,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
......@@ -1098,14 +1085,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
......@@ -1120,7 +1107,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......@@ -1159,7 +1146,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
......
......@@ -849,13 +849,9 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
amdgpu_vm_adjust_size(adev, 64);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
adev->mc.mc_mask = 0xffffffffffULL;
adev->need_dma32 = false;
......
......@@ -1003,13 +1003,9 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
amdgpu_vm_adjust_size(adev, 64);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
......
......@@ -1087,13 +1087,9 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
amdgpu_vm_adjust_size(adev, 64);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
......
......@@ -520,6 +520,11 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
/* TODO: fix num_level for APU when updating vm size and block size */
if (adev->flags & AMD_IS_APU)
adev->vm_manager.num_level = 1;
else
adev->vm_manager.num_level = 3;
amdgpu_vm_manager_init(adev);
......@@ -552,8 +557,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
amdgpu_vm_adjust_size(adev, 64);
} else {
/* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
......@@ -564,11 +568,11 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->vm_manager.vm_size = 1U << 18;
adev->vm_manager.block_size = 9;
DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
adev->vm_manager.vm_size,
adev->vm_manager.block_size);
}
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
&adev->mc.vm_fault);
......
......@@ -250,6 +250,29 @@ typedef struct _ATOM_Vega10_Fan_Table {
USHORT usFanStartTemperature;
} ATOM_Vega10_Fan_Table;
typedef struct _ATOM_Vega10_Fan_Table_V2 {
UCHAR ucRevId;
USHORT usFanOutputSensitivity;
USHORT usFanAcousticLimitRpm;
USHORT usThrottlingRPM;
USHORT usTargetTemperature;
USHORT usMinimumPWMLimit;
USHORT usTargetGfxClk;
USHORT usFanGainEdge;
USHORT usFanGainHotspot;
USHORT usFanGainLiquid;
USHORT usFanGainVrVddc;
USHORT usFanGainVrMvdd;
USHORT usFanGainPlx;
USHORT usFanGainHbm;
UCHAR ucEnableZeroRPM;
USHORT usFanStopTemperature;
USHORT usFanStartTemperature;
UCHAR ucFanParameters;
UCHAR ucFanMinRPM;
UCHAR ucFanMaxRPM;
} ATOM_Vega10_Fan_Table_V2;
typedef struct _ATOM_Vega10_Thermal_Controller {
UCHAR ucRevId;
UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/
......@@ -305,6 +328,33 @@ typedef struct _ATOM_Vega10_PowerTune_Table {
USHORT usTemperatureLimitTedge;
} ATOM_Vega10_PowerTune_Table;
typedef struct _ATOM_Vega10_PowerTune_Table_V2
{
UCHAR ucRevId;
USHORT usSocketPowerLimit;
USHORT usBatteryPowerLimit;
USHORT usSmallPowerLimit;
USHORT usTdcLimit;
USHORT usEdcLimit;
USHORT usSoftwareShutdownTemp;
USHORT usTemperatureLimitHotSpot;
USHORT usTemperatureLimitLiquid1;
USHORT usTemperatureLimitLiquid2;
USHORT usTemperatureLimitHBM;
USHORT usTemperatureLimitVrSoc;
USHORT usTemperatureLimitVrMem;
USHORT usTemperatureLimitPlx;
USHORT usLoadLineResistance;
UCHAR ucLiquid1_I2C_address;
UCHAR ucLiquid2_I2C_address;
UCHAR ucLiquid_I2C_Line;
UCHAR ucVr_I2C_address;
UCHAR ucVr_I2C_Line;
UCHAR ucPlx_I2C_address;
UCHAR ucPlx_I2C_Line;
USHORT usTemperatureLimitTedge;
} ATOM_Vega10_PowerTune_Table_V2;
typedef struct _ATOM_Vega10_Hard_Limit_Record {
ULONG ulSOCCLKLimit;
ULONG ulGFXCLKLimit;
......
......@@ -26,6 +26,34 @@
#include "hwmgr.h"
enum Vega10_I2CLineID {
Vega10_I2CLineID_DDC1 = 0x90,
Vega10_I2CLineID_DDC2 = 0x91,
Vega10_I2CLineID_DDC3 = 0x92,
Vega10_I2CLineID_DDC4 = 0x93,
Vega10_I2CLineID_DDC5 = 0x94,
Vega10_I2CLineID_DDC6 = 0x95,
Vega10_I2CLineID_SCLSDA = 0x96,
Vega10_I2CLineID_DDCVGA = 0x97
};
#define Vega10_I2C_DDC1DATA 0
#define Vega10_I2C_DDC1CLK 1
#define Vega10_I2C_DDC2DATA 2
#define Vega10_I2C_DDC2CLK 3
#define Vega10_I2C_DDC3DATA 4
#define Vega10_I2C_DDC3CLK 5
#define Vega10_I2C_SDA 40
#define Vega10_I2C_SCL 41
#define Vega10_I2C_DDC4DATA 65
#define Vega10_I2C_DDC4CLK 66
#define Vega10_I2C_DDC5DATA 0x48
#define Vega10_I2C_DDC5CLK 0x49
#define Vega10_I2C_DDC6DATA 0x4a
#define Vega10_I2C_DDC6CLK 0x4b
#define Vega10_I2C_DDCVGADATA 0x4c
#define Vega10_I2C_DDCVGACLK 0x4d
extern const struct pp_table_func vega10_pptable_funcs;
extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
......
......@@ -3295,7 +3295,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_trp = ((temp >> 8) & 0x7) + 1;
mem_tras = ((temp >> 11) & 0xf) + 4;
} else if (rdev->family == CHIP_RV350 ||
rdev->family <= CHIP_RV380) {
rdev->family == CHIP_RV380) {
/* rv3x0 */
mem_trcd = (temp & 0x7) + 3;
mem_trp = ((temp >> 8) & 0x7) + 3;
......
......@@ -499,6 +499,7 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
......
......@@ -164,6 +164,16 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].allowed_domains = domain;
}
/* Objects shared as dma-bufs cannot be moved to VRAM */
if (p->relocs[i].robj->prime_shared_count) {
p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
if (!p->relocs[i].allowed_domains) {
DRM_ERROR("BO associated with dma-buf cannot "
"be moved to VRAM\n");
return -EINVAL;
}
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
......
......@@ -1354,6 +1354,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
......
......@@ -120,6 +120,10 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
return r;
}
}
if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
return -EINVAL;
}
return 0;
}
......
......@@ -352,6 +352,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
return 0;
}
if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
/* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
return -EINVAL;
}
radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
......
......@@ -77,6 +77,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
bo->prime_shared_count = 1;
return &bo->gem_base;
}
......@@ -91,6 +92,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0))
bo->prime_shared_count++;
radeon_bo_unreserve(bo);
return ret;
}
......@@ -105,6 +109,8 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
return;
radeon_bo_unpin(bo);
if (bo->prime_shared_count)
bo->prime_shared_count--;
radeon_bo_unreserve(bo);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment