Commit 01d07351 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "I really need to get back to sending these on my Friday, instead of my
  Monday morning, but nothing too amazing in here: a few amdkfd fixes, a
  few radeon fixes, i915 fixes, one tegra fix and one core fix"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm: Zero out invalid vblank timestamp in drm_update_vblank_count.
  drm/tegra: Don't use vblank_disable_immediate on incapable driver.
  drm/radeon: stop trying to suspend UVD sessions
  drm/radeon: more strictly validate the UVD codec
  drm/radeon: make UVD handle checking more strict
  drm/radeon: make VCE handle check more strict
  drm/radeon: fix userptr lockup
  drm/radeon: fix userptr BO unpin bug v3
  drm/amdkfd: Initialize sdma vm when creating sdma queue
  drm/amdkfd: Don't report local memory size
  drm/amdkfd: allow unregister process with queues
  drm/i915: Drop PIPE-A quirk for 945GSE HP Mini
  drm/i915: Sink rate read should be saved in deca-kHz
  drm/i915/dp: there is no audio on port A
  drm/i915: Add missing MacBook Pro models with dual channel LVDS
  drm/i915: Assume dual channel LVDS if pixel clock necessitates it
  drm/radeon: don't setup audio on asics that don't support it
  drm/radeon: disable semaphores for UVD V1 (v2)
parents 41f2a93c 332545b3
...@@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm, ...@@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !qpd); BUG_ON(!dqm || !qpd);
BUG_ON(!list_empty(&qpd->queues_list)); pr_debug("In func %s\n", __func__);
pr_debug("kfd: In func %s\n", __func__); pr_debug("qpd->queues_list is %s\n",
list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0; retval = 0;
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
...@@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
return -ENOMEM; return -ENOMEM;
} }
init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties); &q->gart_mqd_addr, &q->properties);
if (retval != 0) if (retval != 0)
......
...@@ -728,9 +728,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, ...@@ -728,9 +728,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute", sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz( dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
dev->gpu->kgd)); dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size", sysfs_show_64bit_prop(buffer, "local_mem_size",
dev->gpu->kfd2kgd->get_vmem_size( (unsigned long long int) 0);
dev->gpu->kgd));
sysfs_show_32bit_prop(buffer, "fw_version", sysfs_show_32bit_prop(buffer, "fw_version",
dev->gpu->kfd2kgd->get_fw_version( dev->gpu->kfd2kgd->get_fw_version(
......
...@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) ...@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
/* Reinitialize corresponding vblank timestamp if high-precision query /* Reinitialize corresponding vblank timestamp if high-precision query
* available. Skip this step if query unsupported or failed. Will * available. Skip this step if query unsupported or failed. Will
* reinitialize delayed at next vblank interrupt in that case. * reinitialize delayed at next vblank interrupt in that case and
* assign 0 for now, to mark the vblanktimestamp as invalid.
*/ */
if (rc) { tslot = atomic_read(&vblank->count) + diff;
tslot = atomic_read(&vblank->count) + diff; vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
smp_mb__before_atomic(); smp_mb__before_atomic();
atomic_add(diff, &vblank->count); atomic_add(diff, &vblank->count);
......
...@@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = { ...@@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
}; };
static struct intel_quirk intel_quirks[] = { static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */ /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force }, { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
......
...@@ -1348,7 +1348,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -1348,7 +1348,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_dp_encoder = true; pipe_config->has_dp_encoder = true;
pipe_config->has_drrs = false; pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio; pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode, intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
...@@ -2211,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, ...@@ -2211,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
int dotclock; int dotclock;
tmp = I915_READ(intel_dp->output_reg); tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_AUDIO_OUTPUT_ENABLE)
pipe_config->has_audio = true; pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
if (tmp & DP_SYNC_HS_HIGH) if (tmp & DP_SYNC_HS_HIGH)
...@@ -3812,7 +3812,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3812,7 +3812,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (val == 0) if (val == 0)
break; break;
intel_dp->sink_rates[i] = val * 200; /* Value read is in kHz while drm clock is saved in deca-kHz */
intel_dp->sink_rates[i] = (val * 200) / 10;
} }
intel_dp->num_sink_rates = i; intel_dp->num_sink_rates = i;
} }
......
...@@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) ...@@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
static const struct dmi_system_id intel_dual_link_lvds[] = { static const struct dmi_system_id intel_dual_link_lvds[] = {
{ {
.callback = intel_dual_link_lvds_callback, .callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro (Core i5/i7 Series)", .ident = "Apple MacBook Pro 15\" (2010)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
},
},
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro 15\" (2011)",
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
}, },
}, },
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro 15\" (2012)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
},
},
{ } /* terminating entry */ { } /* terminating entry */
}; };
...@@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) ...@@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
if (i915.lvds_channel_mode > 0) if (i915.lvds_channel_mode > 0)
return i915.lvds_channel_mode == 2; return i915.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
> 112999)
return true;
if (dmi_check_system(intel_dual_link_lvds)) if (dmi_check_system(intel_dual_link_lvds))
return true; return true;
...@@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev)
out: out:
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n", DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->is_dual_link ? "dual" : "single");
...@@ -1125,7 +1148,6 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -1125,7 +1148,6 @@ void intel_lvds_init(struct drm_device *dev)
} }
drm_connector_register(connector); drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector, INVALID_PIPE); intel_panel_setup_backlight(connector, INVALID_PIPE);
return; return;
......
...@@ -1673,7 +1673,6 @@ struct radeon_uvd { ...@@ -1673,7 +1673,6 @@ struct radeon_uvd {
struct radeon_bo *vcpu_bo; struct radeon_bo *vcpu_bo;
void *cpu_addr; void *cpu_addr;
uint64_t gpu_addr; uint64_t gpu_addr;
void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES]; atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
unsigned img_size[RADEON_MAX_UVD_HANDLES]; unsigned img_size[RADEON_MAX_UVD_HANDLES];
......
...@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = { ...@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = {
static struct radeon_asic_ring rv770_uvd_ring = { static struct radeon_asic_ring rv770_uvd_ring = {
.ib_execute = &uvd_v1_0_ib_execute, .ib_execute = &uvd_v1_0_ib_execute,
.emit_fence = &uvd_v2_2_fence_emit, .emit_fence = &uvd_v2_2_fence_emit,
.emit_semaphore = &uvd_v1_0_semaphore_emit, .emit_semaphore = &uvd_v2_2_semaphore_emit,
.cs_parse = &radeon_uvd_cs_parse, .cs_parse = &radeon_uvd_cs_parse,
.ring_test = &uvd_v1_0_ring_test, .ring_test = &uvd_v1_0_ring_test,
.ib_test = &uvd_v1_0_ib_test, .ib_test = &uvd_v1_0_ib_test,
......
...@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); ...@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int uvd_v2_2_resume(struct radeon_device *rdev); int uvd_v2_2_resume(struct radeon_device *rdev);
void uvd_v2_2_fence_emit(struct radeon_device *rdev, void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
/* uvd v3.1 */ /* uvd v3.1 */
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
......
...@@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector, ...@@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector,
return; return;
rdev = connector->encoder->dev->dev_private; rdev = connector->encoder->dev->dev_private;
if (!radeon_audio_chipset_supported(rdev))
return;
radeon_encoder = to_radeon_encoder(connector->encoder); radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv; dig = radeon_encoder->enc_priv;
......
...@@ -142,6 +142,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -142,6 +142,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
list_for_each_entry(bo, &node->bos, mn_list) { list_for_each_entry(bo, &node->bos, mn_list) {
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
continue;
r = radeon_bo_reserve(bo, true); r = radeon_bo_reserve(bo, true);
if (r) { if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r); DRM_ERROR("(%ld) failed to reserve user bo\n", r);
......
...@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{ {
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
struct scatterlist *sg; struct sg_page_iter sg_iter;
int i;
int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ? enum dma_data_direction direction = write ?
...@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */ /* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
struct page *page = sg_page(sg); struct page *page = sg_page_iter_page(&sg_iter);
if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
set_page_dirty(page); set_page_dirty(page);
......
...@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev) ...@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
int radeon_uvd_suspend(struct radeon_device *rdev) int radeon_uvd_suspend(struct radeon_device *rdev)
{ {
unsigned size; int i, r;
void *ptr;
int i;
if (rdev->uvd.vcpu_bo == NULL) if (rdev->uvd.vcpu_bo == NULL)
return 0; return 0;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&rdev->uvd.handles[i])) uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
break; if (handle != 0) {
struct radeon_fence *fence;
if (i == RADEON_MAX_UVD_HANDLES) radeon_uvd_note_usage(rdev);
return 0;
size = radeon_bo_size(rdev->uvd.vcpu_bo); r = radeon_uvd_get_destroy_msg(rdev,
size -= rdev->uvd_fw->size; R600_RING_TYPE_UVD_INDEX, handle, &fence);
if (r) {
DRM_ERROR("Error destroying UVD (%d)!\n", r);
continue;
}
ptr = rdev->uvd.cpu_addr; radeon_fence_wait(fence, false);
ptr += rdev->uvd_fw->size; radeon_fence_unref(&fence);
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); rdev->uvd.filp[i] = NULL;
memcpy(rdev->uvd.saved_bo, ptr, size); atomic_set(&rdev->uvd.handles[i], 0);
}
}
return 0; return 0;
} }
...@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) ...@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr; ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size; ptr += rdev->uvd_fw->size;
if (rdev->uvd.saved_bo != NULL) { memset(ptr, 0, size);
memcpy(ptr, rdev->uvd.saved_bo, size);
kfree(rdev->uvd.saved_bo);
rdev->uvd.saved_bo = NULL;
} else
memset(ptr, 0, size);
return 0; return 0;
} }
...@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) ...@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
return 0; return 0;
} }
static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
unsigned stream_type)
{
switch (stream_type) {
case 0: /* H264 */
case 1: /* VC1 */
/* always supported */
return 0;
case 3: /* MPEG2 */
case 4: /* MPEG4 */
/* only since UVD 3 */
if (p->rdev->family >= CHIP_PALM)
return 0;
/* fall through */
default:
DRM_ERROR("UVD codec not supported by hardware %d!\n",
stream_type);
return -EINVAL;
}
}
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
unsigned offset, unsigned buf_sizes[]) unsigned offset, unsigned buf_sizes[])
{ {
...@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, ...@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL; return -EINVAL;
} }
if (msg_type == 1) { switch (msg_type) {
/* it's a decode msg, calc buffer sizes */ case 0:
r = radeon_uvd_cs_msg_decode(msg, buf_sizes); /* it's a create msg, calc image size (width * height) */
/* calc image size (width * height) */ img_size = msg[7] * msg[8];
img_size = msg[6] * msg[7];
r = radeon_uvd_validate_codec(p, msg[4]);
radeon_bo_kunmap(bo); radeon_bo_kunmap(bo);
if (r) if (r)
return r; return r;
} else if (msg_type == 2) { /* try to alloc a new handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL;
}
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
p->rdev->uvd.img_size[i] = img_size;
return 0;
}
}
DRM_ERROR("No more free UVD handles!\n");
return -EINVAL;
case 1:
/* it's a decode msg, validate codec and calc buffer sizes */
r = radeon_uvd_validate_codec(p, msg[4]);
if (!r)
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
radeon_bo_kunmap(bo);
if (r)
return r;
/* validate the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
if (p->rdev->uvd.filp[i] != p->filp) {
DRM_ERROR("UVD handle collision detected!\n");
return -EINVAL;
}
return 0;
}
}
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
return -ENOENT;
case 2:
/* it's a destroy msg, free the handle */ /* it's a destroy msg, free the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
radeon_bo_kunmap(bo); radeon_bo_kunmap(bo);
return 0; return 0;
} else {
/* it's a create msg, calc image size (width * height) */
img_size = msg[7] * msg[8];
radeon_bo_kunmap(bo);
if (msg_type != 0) { default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
return -EINVAL;
}
/* it's a create msg, no special handling needed */
}
/* create or decode, validate the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
return 0;
}
/* handle not found try to alloc a new one */ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { return -EINVAL;
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
p->rdev->uvd.img_size[i] = img_size;
return 0;
}
} }
DRM_ERROR("No more free UVD handles!\n"); BUG();
return -EINVAL; return -EINVAL;
} }
......
...@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, ...@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
* *
* @p: parser context * @p: parser context
* @handle: handle to validate * @handle: handle to validate
* @allocated: allocated a new handle?
* *
* Validates the handle and return the found session index or -EINVAL * Validates the handle and return the found session index or -EINVAL
* we we don't have another free session index. * we we don't have another free session index.
*/ */
int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
uint32_t handle, bool *allocated)
{ {
unsigned i; unsigned i;
*allocated = false;
/* validate the handle */ /* validate the handle */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
if (atomic_read(&p->rdev->vce.handles[i]) == handle) if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
if (p->rdev->vce.filp[i] != p->filp) {
DRM_ERROR("VCE handle collision detected!\n");
return -EINVAL;
}
return i; return i;
}
} }
/* handle not found try to alloc a new one */ /* handle not found try to alloc a new one */
...@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) ...@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
p->rdev->vce.filp[i] = p->filp; p->rdev->vce.filp[i] = p->filp;
p->rdev->vce.img_size[i] = 0; p->rdev->vce.img_size[i] = 0;
*allocated = true;
return i; return i;
} }
} }
...@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) ...@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
int radeon_vce_cs_parse(struct radeon_cs_parser *p) int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{ {
int session_idx = -1; int session_idx = -1;
bool destroyed = false; bool destroyed = false, created = false, allocated = false;
uint32_t tmp, handle = 0; uint32_t tmp, handle = 0;
uint32_t *size = &tmp; uint32_t *size = &tmp;
int i, r; int i, r = 0;
while (p->idx < p->chunk_ib->length_dw) { while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx); uint32_t len = radeon_get_ib_value(p, p->idx);
...@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
if ((len < 8) || (len & 3)) { if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len); DRM_ERROR("invalid VCE command length (%d)!\n", len);
return -EINVAL; r = -EINVAL;
goto out;
} }
if (destroyed) { if (destroyed) {
DRM_ERROR("No other command allowed after destroy!\n"); DRM_ERROR("No other command allowed after destroy!\n");
return -EINVAL; r = -EINVAL;
goto out;
} }
switch (cmd) { switch (cmd) {
case 0x00000001: // session case 0x00000001: // session
handle = radeon_get_ib_value(p, p->idx + 2); handle = radeon_get_ib_value(p, p->idx + 2);
session_idx = radeon_vce_validate_handle(p, handle); session_idx = radeon_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) if (session_idx < 0)
return session_idx; return session_idx;
size = &p->rdev->vce.img_size[session_idx]; size = &p->rdev->vce.img_size[session_idx];
...@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
break; break;
case 0x01000001: // create case 0x01000001: // create
created = true;
if (!allocated) {
DRM_ERROR("Handle already in use!\n");
r = -EINVAL;
goto out;
}
*size = radeon_get_ib_value(p, p->idx + 8) * *size = radeon_get_ib_value(p, p->idx + 8) *
radeon_get_ib_value(p, p->idx + 10) * radeon_get_ib_value(p, p->idx + 10) *
8 * 3 / 2; 8 * 3 / 2;
...@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
*size); *size);
if (r) if (r)
return r; goto out;
r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
*size / 3); *size / 3);
if (r) if (r)
return r; goto out;
break; break;
case 0x02000001: // destroy case 0x02000001: // destroy
...@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
*size * 2); *size * 2);
if (r) if (r)
return r; goto out;
break; break;
case 0x05000004: // video bitstream buffer case 0x05000004: // video bitstream buffer
...@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) ...@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
tmp); tmp);
if (r) if (r)
return r; goto out;
break; break;
case 0x05000005: // feedback buffer case 0x05000005: // feedback buffer
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
4096); 4096);
if (r) if (r)
return r; goto out;
break; break;
default: default:
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
return -EINVAL; r = -EINVAL;
goto out;
} }
if (session_idx == -1) { if (session_idx == -1) {
DRM_ERROR("no session command at start of IB\n"); DRM_ERROR("no session command at start of IB\n");
return -EINVAL; r = -EINVAL;
goto out;
} }
p->idx += len / 4; p->idx += len / 4;
} }
if (destroyed) { if (allocated && !created) {
/* IB contains a destroy msg, free the handle */ DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out:
if ((!r && destroyed) || (r && allocated)) {
/*
* IB contains a destroy msg or we have allocated an
* handle and got an error, anyway free the handle
*/
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
} }
return 0; return r;
} }
/** /**
......
...@@ -989,6 +989,9 @@ ...@@ -989,6 +989,9 @@
((n) & 0x3FFF) << 16) ((n) & 0x3FFF) << 16)
/* UVD */ /* UVD */
#define UVD_SEMA_ADDR_LOW 0xef00
#define UVD_SEMA_ADDR_HIGH 0xef04
#define UVD_SEMA_CMD 0xef08
#define UVD_GPCOM_VCPU_CMD 0xef0c #define UVD_GPCOM_VCPU_CMD 0xef0c
#define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA0 0xef10
#define UVD_GPCOM_VCPU_DATA1 0xef14 #define UVD_GPCOM_VCPU_DATA1 0xef14
......
...@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, ...@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
{ {
uint64_t addr = semaphore->gpu_addr; /* disable semaphores for UVD V1 hardware */
return false;
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
return true;
} }
/** /**
......
...@@ -59,6 +59,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, ...@@ -59,6 +59,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 2); radeon_ring_write(ring, 2);
} }
/**
* uvd_v2_2_semaphore_emit - emit semaphore command
*
* @rdev: radeon_device pointer
* @ring: radeon_ring pointer
* @semaphore: semaphore to emit commands for
* @emit_wait: true if we should emit a wait command
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
return true;
}
/** /**
* uvd_v2_2_resume - memory controller programming * uvd_v2_2_resume - memory controller programming
* *
......
...@@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) ...@@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->irq_enabled = true; drm->irq_enabled = true;
/* syncpoints are used for full 32-bit hardware VBLANK counters */ /* syncpoints are used for full 32-bit hardware VBLANK counters */
drm->vblank_disable_immediate = true;
drm->max_vblank_count = 0xffffffff; drm->max_vblank_count = 0xffffffff;
err = drm_vblank_init(drm, drm->mode_config.num_crtc); err = drm_vblank_init(drm, drm->mode_config.num_crtc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment