Commit a2dbb7b5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "A bunch of change across the board, the main things are some vblank
  fallout in radeon and nouveau required some work, but I think this
  should fix it all.  There is also one drm fix for an oops in vmwgfx
  with how we pass the drm master around.

  The rest is just some amdgpu, i915, imx and rockchip fixes.

  Probably more than I'd like at this point, but hopefully things settle
  down now"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (40 commits)
  drm/amdgpu: Fixup hw vblank counter/ts for new drm_update_vblank_count() (v3)
  drm/radeon: Fixup hw vblank counter/ts for new drm_update_vblank_count() (v2)
  drm/radeon: Retry DDC probing on DVI on failure if we got an HPD interrupt
  drm/amdgpu: add spin lock to protect freed list in vm (v2)
  drm/amdgpu: partially revert "drm/amdgpu: fix VM_CONTEXT*_PAGE_TABLE_END_ADDR" v2
  drm/amdgpu: take a BO reference for the user fence
  drm/amdgpu: take a BO reference in the display code
  drm/amdgpu: set snooped flags only on system addresses v2
  drm/nouveau: Fix pre-nv50 pageflip events (v4)
  drm: Fix an unwanted master inheritance v2
  drm/amdgpu: fix race condition in amd_sched_entity_push_job
  drm/amdgpu: add err check for pin userptr
  drm/i915: take a power domain reference while checking the HDMI live status
  drm/i915: add MISSING_CASE to a few port/aux power domain helpers
  drm/i915/ddi: fix intel_display_port_aux_power_domain() after HDMI detect
  drm/i915: Introduce a gmbus power domain
  drm/i915: Clean up AUX power domain handling
  drm/rockchip: Use CRTC vblank event interface
  drm/rockchip: Fix module autoload for OF platform driver
  drm/rockchip: vop: fix window origin calculation
  ...
parents 9cfe5212 df4d4aa9
......@@ -539,6 +539,7 @@ struct amdgpu_bo {
/* Constant after initialization */
struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
......@@ -955,6 +956,8 @@ struct amdgpu_vm {
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
/* for interval tree */
spinlock_t it_lock;
/* protecting freed */
spinlock_t freed_lock;
};
struct amdgpu_vm_manager {
......
......@@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
amdgpu_bo_ref(p->uf.bo);
drm_gem_object_unreference_unlocked(gobj);
p->uf.offset = fence_data->offset;
} else {
ret = -EINVAL;
......@@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
amdgpu_bo_unref(&parser->uf.bo);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
......@@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i]);
kfree(job->ibs);
if (job->uf.bo)
drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
amdgpu_bo_unref(&job->uf.bo);
return 0;
}
......
......@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
unsigned i;
int vpos, hpos, stat, min_udelay;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
for (i = 0; i < work->shared_count; ++i)
......@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* If this happens to execute within the "virtually extended" vblank
* interval before the start of the real vblank interval then it needs
* to delay programming the mmio flip until the real vblank is entered.
* This prevents completing a flip too early due to the way we fudge
* our vblank counter and vblank timestamps in order to work around the
* problem that the hw fires vblank interrupts before actual start of
* vblank (when line buffer refilling is done for a frame). It
* complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
* timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
*
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
for (;;) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
*/
stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode);
if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
!(vpos >= 0 && hpos <= 0))
break;
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
......@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
amdgpu_bo_unref(&work->old_rbo);
kfree(work->shared);
kfree(work);
}
......@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
obj = old_amdgpu_fb->obj;
/* take a reference to the old object */
drm_gem_object_reference(obj);
work->old_rbo = gem_to_amdgpu_bo(obj);
amdgpu_bo_ref(work->old_rbo);
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
obj = new_amdgpu_fb->obj;
......@@ -222,7 +259,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
amdgpu_bo_unreserve(new_rbo);
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
amdgpu_bo_unref(&work->old_rbo);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
......@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* \param dev Device to query.
* \param pipe Crtc to query.
* \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
* For driver internal use only also supports these flags:
*
* USE_REAL_VBLANKSTART to use the real start of vblank instead
* of a fudged earlier start of vblank.
*
* GET_DISTANCE_TO_VBLANKSTART to return distance to the
* fudged earlier start of vblank in *vpos and the distance
* to true start of vblank in *hpos.
*
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
......@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
vbl_end = 0;
}
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
/* Caller wants distance from real vbl_start in *hpos */
*hpos = *vpos - vbl_start;
}
/* Fudge vblank to start a few scanlines earlier to handle the
* problem that vblank irqs fire a few scanlines before start
* of vblank. Some driver internal callers need the true vblank
* start to be used and signal this via the USE_REAL_VBLANKSTART flag.
*
* The cause of the "early" vblank irq is that the irq is triggered
* by the line buffer logic when the line buffer read position enters
* the vblank, whereas our crtc scanout position naturally lags the
* line buffer read position.
*/
if (!(flags & USE_REAL_VBLANKSTART))
vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
/* Test scanout position against vblank region. */
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
in_vbl = false;
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_IN_VBLANK;
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
/* Caller wants distance from fudged earlier vbl_start */
*vpos -= vbl_start;
return ret;
}
/* Check if inside vblank area and apply corrective offsets:
* vpos will then be >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
......@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* Correct for shifted end of vbl at vbl_end. */
*vpos = *vpos - vbl_end;
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_IN_VBLANK;
/* Is vpos outside nominal vblank area, but less than
* 1/100 of a frame height away from start of vblank?
* If so, assume this isn't a massively delayed vblank
* interrupt, but a vblank interrupt that fired a few
* microseconds before true start of vblank. Compensate
* by adding a full frame duration to the final timestamp.
* Happens, e.g., on ATI R500, R600.
*
* We only do this if DRM_CALLED_FROM_VBLIRQ.
*/
if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
vbl_start = mode->crtc_vdisplay;
vtotal = mode->crtc_vtotal;
if (vbl_start - *vpos < vtotal / 100) {
*vpos -= vtotal;
/* Signal this correction as "applied". */
ret |= 0x8;
}
}
return ret;
}
......
......@@ -235,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_USERPTR_REGISTER))
return -EINVAL;
if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
!(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
!(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
/* if we want to write to it we must require anonymous
memory and install a MMU notifier */
......
......@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
{
struct amdgpu_device *adev = dev->dev_private;
int vpos, hpos, stat;
u32 count;
if (pipe >= adev->mode_info.num_crtc) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
return amdgpu_display_vblank_get_counter(adev, pipe);
/* The hw increments its frame counter at start of vsync, not at start
* of vblank, as is required by DRM core vblank counter handling.
* Cook the hw count here to make it appear to the caller as if it
* incremented at start of vblank. We measure distance to start of
* vblank in vpos. vpos therefore will be >= 0 between start of vblank
* and start of vsync, so vpos >= 0 means to bump the hw frame counter
* result by 1 to give the proper appearance to caller.
*/
if (adev->mode_info.crtcs[pipe]) {
/* Repeat readout if needed to provide stable result if
* we cross start of vsync during the queries.
*/
do {
count = amdgpu_display_vblank_get_counter(adev, pipe);
/* Ask amdgpu_get_crtc_scanoutpos to return vpos as
* distance to start of vblank, instead of regular
* vertical scanout pos.
*/
stat = amdgpu_get_crtc_scanoutpos(
dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&adev->mode_info.crtcs[pipe]->base.hwmode);
} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
} else {
DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
pipe, vpos);
/* Bump counter if we are at >= leading edge of vblank,
* but before vsync where vpos would turn negative and
* the hw counter really increments.
*/
if (vpos >= 0)
count++;
}
} else {
/* Fallback to use value as is. */
count = amdgpu_display_vblank_get_counter(adev, pipe);
DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
}
return count;
}
/**
......
......@@ -407,6 +407,7 @@ struct amdgpu_crtc {
u32 line_time;
u32 wm_low;
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
};
......@@ -528,6 +529,10 @@ struct amdgpu_framebuffer {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
#define USE_REAL_VBLANKSTART (1 << 30)
#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
void amdgpu_link_encoder_connector(struct drm_device *dev);
struct drm_connector *
......
......@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->adev->gem.mutex);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
kfree(bo->metadata);
kfree(bo);
}
......
......@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
int r;
if (gtt->userptr)
amdgpu_ttm_tt_pin_userptr(ttm);
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(ttm);
if (r) {
DRM_ERROR("failed to pin userptr\n");
return r;
}
}
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
......@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
if (mem && mem->mem_type == TTM_PL_TT)
if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM;
if (!ttm || ttm->caching_state == tt_cached)
flags |= AMDGPU_PTE_SNOOPED;
if (ttm->caching_state == tt_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
if (adev->asic_type >= CHIP_TOPAZ)
flags |= AMDGPU_PTE_EXECUTABLE;
......
......@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
int r;
spin_lock(&vm->freed_lock);
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
spin_unlock(&vm->freed_lock);
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
kfree(mapping);
if (r)
return r;
spin_lock(&vm->freed_lock);
}
spin_unlock(&vm->freed_lock);
return 0;
}
......@@ -1079,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (r)
goto error_free;
/* Keep a reference to the page table to avoid freeing
* them up in the wrong order.
*/
pt->parent = amdgpu_bo_ref(vm->page_directory);
r = amdgpu_vm_clear_bo(adev, pt);
if (r) {
amdgpu_bo_unref(&pt);
......@@ -1150,10 +1159,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
if (valid) {
spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed);
else
spin_unlock(&vm->freed_lock);
} else {
kfree(mapping);
}
return 0;
}
......@@ -1186,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
interval_tree_remove(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed);
spin_unlock(&vm->freed_lock);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
......@@ -1247,6 +1261,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
spin_lock_init(&vm->it_lock);
spin_lock_init(&vm->freed_lock);
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
......
......@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
......@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
......@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
......
......@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
......@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
......@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
......
......@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
......@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
......@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
......
......@@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
......
......@@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
......
......@@ -288,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
*/
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
{
struct amd_gpu_scheduler *sched = sched_job->sched;
struct amd_sched_entity *entity = sched_job->s_entity;
bool added, first = false;
......@@ -302,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
/* first job wakes up scheduler */
if (first)
amd_sched_wakeup(sched_job->sched);
amd_sched_wakeup(sched);
return added;
}
......@@ -318,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
trace_amd_sched_job(sched_job);
}
/**
......
......@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
if (!file_priv->allowed_master) {
ret = drm_new_set_master(dev, file_priv);
goto out_unlock;
}
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
if (dev->driver->master_set) {
......
......@@ -125,6 +125,60 @@ static int drm_cpu_valid(void)
return 1;
}
/**
* drm_new_set_master - Allocate a new master object and become master for the
* associated master realm.
*
* @dev: The associated device.
* @fpriv: File private identifying the client.
*
* This function must be called with dev::struct_mutex held.
* Returns negative error code on failure. Zero on success.
*/
int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
{
struct drm_master *old_master;
int ret;
lockdep_assert_held_once(&dev->master_mutex);
/* create a new master */
fpriv->minor->master = drm_master_create(fpriv->minor);
if (!fpriv->minor->master)
return -ENOMEM;
/* take another reference for the copy in the local file priv */
old_master = fpriv->master;
fpriv->master = drm_master_get(fpriv->minor->master);
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, fpriv->master);
if (ret)
goto out_err;
}
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, fpriv, true);
if (ret)
goto out_err;
}
fpriv->is_master = 1;
fpriv->allowed_master = 1;
fpriv->authenticated = 1;
if (old_master)
drm_master_put(&old_master);
return 0;
out_err:
/* drop both references and restore old master on failure */
drm_master_put(&fpriv->minor->master);
drm_master_put(&fpriv->master);
fpriv->master = old_master;
return ret;
}
/**
* Called whenever a process opens /dev/drm.
*
......@@ -189,35 +243,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
mutex_lock(&dev->master_mutex);
if (drm_is_primary_client(priv) && !priv->minor->master) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
ret = -ENOMEM;
ret = drm_new_set_master(dev, priv);
if (ret)
goto out_close;
}
priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1;
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master);
if (ret) {
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
goto out_close;
}
}
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, priv, true);
if (ret) {
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
goto out_close;
}
}
} else if (drm_is_primary_client(priv)) {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
......
......@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
assert_spin_locked(&dev->event_lock);
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
......@@ -992,6 +993,57 @@ static void send_vblank_event(struct drm_device *dev,
e->event.sequence);
}
/**
* drm_arm_vblank_event - arm vblank event after pageflip
* @dev: DRM device
* @pipe: CRTC index
* @e: the event to prepare to send
*
* A lot of drivers need to generate vblank events for the very next vblank
* interrupt. For example when the page flip interrupt happens when the page
* flip gets armed, but not when it actually executes within the next vblank
* period. This helper function implements exactly the required vblank arming
* behaviour.
*
* Caller must hold event lock. Caller must also hold a vblank reference for
* the event @e, which will be dropped when the next vblank arrives.
*
* This is the legacy version of drm_crtc_arm_vblank_event().
*/
void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
struct drm_pending_vblank_event *e)
{
assert_spin_locked(&dev->event_lock);
e->pipe = pipe;
e->event.sequence = drm_vblank_count(dev, pipe);
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
EXPORT_SYMBOL(drm_arm_vblank_event);
/**
* drm_crtc_arm_vblank_event - arm vblank event after pageflip
* @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* A lot of drivers need to generate vblank events for the very next vblank
* interrupt. For example when the page flip interrupt happens when the page
* flip gets armed, but not when it actually executes within the next vblank
* period. This helper function implements exactly the required vblank arming
* behaviour.
*
* Caller must hold event lock. Caller must also hold a vblank reference for
* the event @e, which will be dropped when the next vblank arrives.
*
* This is the native KMS version of drm_arm_vblank_event().
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
}
EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
/**
* drm_send_vblank_event - helper to send vblank event after pageflip
* @dev: DRM device
......
......@@ -2734,6 +2734,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
return "INIT";
default:
......
......@@ -199,6 +199,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment