Commit 218c872b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2012-05-06-merged' of...

Merge tag 'drm-intel-next-2012-05-06-merged' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next

Daniel says

Highlights:
- sparse fixes from Ben.
- tons of little cleanups from Chris all over: tiling_changed
 clarification, deferred_free list removal, ...
- fix up irq handler on gen2 & gen3 + related cleanups from Chris
- prep work for wait_rendering_timeout from Ben with some nice
 refactorings
- first set of infoframe fixes from Paulo for doubleclocked CEA modes
- improve pch pll handling from Jesse and Chris
- gpu hangman, this also contains the reset fix for gen4
- rps sanity check from Chris - this papers over issues when the gpu fails
 to clock up on snb/ivb, and it is shockingly easy to hit. The code
 prints a big WARN backtrace and restores the hw to a sane state. The
 real fix is still in the works.

Atm I'm aware of 2 regressions in -next:
- One of the gmbus patches (not gmbus itself) regressed lvds detection on
 a MacbookPro. I've analyzed the bug already and I think I know what's
 going on, patch is awaiting test feedback.
- Just today QA reported that DP on ilk regressed. That bug is fresh of
 the press and still awaiting detailed logfiles and the bisect result.
 The only thing that's clear atm is that -fixes works and -next doesn't.
parents b06d66be 5e13a0c5
......@@ -11,6 +11,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_sysfs.o \
i915_trace_points.o \
......
This diff is collapsed.
This diff is collapsed.
......@@ -377,18 +377,23 @@ void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
pci_dev_put(pch);
}
......@@ -433,7 +438,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(FORCEWAKE_MT);
count = 0;
......@@ -475,7 +480,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
......@@ -668,7 +673,7 @@ int i915_resume(struct drm_device *dev)
return 0;
}
static int i8xx_do_reset(struct drm_device *dev, u8 flags)
static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -702,11 +707,12 @@ static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
return gdrst & 0x1;
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
static int i965_do_reset(struct drm_device *dev, u8 flags)
static int i965_do_reset(struct drm_device *dev)
{
int ret;
u8 gdrst;
/*
......@@ -715,20 +721,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
* triggers the reset; when done, the hardware will clear it.
*/
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
pci_write_config_byte(dev->pdev, I965_GDRST,
gdrst | GRDOM_RENDER |
GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
/* We can't reset render&media without also resetting display ... */
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
pci_write_config_byte(dev->pdev, I965_GDRST,
gdrst | GRDOM_MEDIA |
GRDOM_RESET_ENABLE);
return wait_for(i965_reset_complete(dev), 500);
}
static int ironlake_do_reset(struct drm_device *dev, u8 flags)
static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
u32 gdrst;
int ret;
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
if (ret)
return ret;
/* We can't reset render&media without also resetting display ... */
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
static int gen6_do_reset(struct drm_device *dev, u8 flags)
static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
......@@ -763,10 +792,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
return ret;
}
static int intel_gpu_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = -ENODEV;
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
ret = gen6_do_reset(dev);
break;
case 5:
ret = ironlake_do_reset(dev);
break;
case 4:
ret = i965_do_reset(dev);
break;
case 2:
ret = i8xx_do_reset(dev);
break;
}
/* Also reset the gpu hangman. */
if (dev_priv->stop_rings) {
DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
dev_priv->stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
}
return ret;
}
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
* @flags: reset domains
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
......@@ -779,14 +842,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
* - re-init interrupt state
* - re-init display
*/
int i915_reset(struct drm_device *dev, u8 flags)
int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/*
* We really should only reset the display subsystem if we actually
* need to
*/
bool need_display = true;
int ret;
if (!i915_try_reset)
......@@ -795,26 +853,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
dev_priv->stop_rings = 0;
i915_gem_reset(dev);
ret = -ENODEV;
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
if (get_seconds() - dev_priv->last_gpu_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
ret = gen6_do_reset(dev, flags);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
case 4:
ret = i965_do_reset(dev, flags);
break;
case 2:
ret = i8xx_do_reset(dev, flags);
break;
}
else
ret = intel_gpu_reset(dev);
dev_priv->last_gpu_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
......@@ -856,23 +904,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
intel_modeset_init_hw(dev);
drm_irq_uninstall(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
mutex_lock(&dev->struct_mutex);
}
mutex_unlock(&dev->struct_mutex);
/*
* Perform a full modeset as on later generations, e.g. Ironlake, we may
* need to retrain the display link and cannot just restore the register
* values.
*/
if (need_display) {
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex);
} else {
mutex_unlock(&dev->struct_mutex);
}
return 0;
......
......@@ -39,6 +39,7 @@
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
/* General customization:
*/
......@@ -78,6 +79,16 @@ enum port {
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
struct intel_pch_pll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
int pll_reg;
int fp0_reg;
int fp1_reg;
};
#define I915_NUM_PLLS 2
/* Interface history:
*
* 1.1: Original.
......@@ -122,11 +133,11 @@ struct opregion_asle;
struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
void *vbt;
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
......@@ -161,8 +172,11 @@ struct sdvo_device_mapping {
struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
u32 eir;
u32 pgtbl_er;
u32 ier;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
......@@ -228,11 +242,13 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
void (*sanitize_pm)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
......@@ -328,7 +344,6 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
int has_gem;
int relative_constants_mode;
void __iomem *regs;
......@@ -357,7 +372,6 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
......@@ -386,11 +400,9 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
int tex_lru_log_granularity;
int allow_batchbuffer;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
......@@ -402,6 +414,8 @@ typedef struct drm_i915_private {
uint32_t last_instdone;
uint32_t last_instdone1;
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
......@@ -453,6 +467,7 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
......@@ -677,23 +692,9 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
/**
* LRU list of objects which are not in the ringbuffer but
* are still pinned in the GTT.
*/
struct list_head pinned_list;
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
* List of objects currently pending being freed.
*
* These objects are no longer in use, but due to a signal
* we were prevented from freeing them at the appointed time.
*/
struct list_head deferred_free_list;
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
......@@ -742,6 +743,16 @@ typedef struct drm_i915_private {
size_t object_memory;
u32 object_count;
} mm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
} dri1;
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
......@@ -751,7 +762,8 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
bool flip_pending_is_done;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
/* Reclocking support */
bool render_reclock_avail;
......@@ -869,7 +881,14 @@ struct drm_i915_gem_object {
* Current tiling mode for the object.
*/
unsigned int tiling_mode:2;
unsigned int tiling_changed:1;
/**
* Whether the tiling parameters for the currently associated fence
* register have changed. Note that for the purposes of tracking
* tiling changes we also treat the unfenced register, the register
* slot that the object occupies whilst it executes a fenced
* command (such as BLT on gen2/3), as a "fence".
*/
unsigned int fence_dirty:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
......@@ -1116,6 +1135,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
......@@ -1133,7 +1153,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
extern int i915_reset(struct drm_device *dev, u8 flags);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
......@@ -1143,19 +1163,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_irq_init(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_error_state_free(struct kref *error_ref);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
......@@ -1287,18 +1298,18 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno,
bool do_retire);
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
......@@ -1349,10 +1360,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
int __must_check i915_gem_evict_everything(struct drm_device *dev,
bool purgeable_only);
int __must_check i915_gem_evict_inactive(struct drm_device *dev,
bool purgeable_only);
int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
......@@ -1467,28 +1479,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
struct intel_display_error_state *error);
#endif
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
#define BEGIN_LP_RING(n) \
intel_ring_begin(LP_RING(dev_priv), (n))
#define OUT_RING(x) \
intel_ring_emit(LP_RING(dev_priv), x)
#define ADVANCE_LP_RING() \
intel_ring_advance(LP_RING(dev_priv))
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (LP_RING(dev->dev_private)->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
......
This diff is collapsed.
......@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
}
}
list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed pinned %p\n", obj);
err++;
break;
} else if (!obj->pin_count || obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
obj,
obj->pin_count, obj->active,
obj->base.write_domain);
err++;
}
}
return warned = err;
}
#endif /* WATCH_INACTIVE */
......
......@@ -35,6 +35,9 @@
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
if (obj->pin_count)
return false;
list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space);
}
......@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj->base.write_domain || obj->pin_count)
if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
......@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (obj->pin_count)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (!obj->base.write_domain || obj->pin_count)
if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
......@@ -166,8 +166,9 @@ int
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
int ret,i;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
......@@ -177,31 +178,30 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev, true);
ret = i915_gpu_idle(dev);
if (ret)
return ret;
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
* with retire requests.
*/
for (i = 0; i < I915_NUM_RINGS; i++)
if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list)))
return -EBUSY;
return i915_gem_evict_inactive(dev, purgeable_only);
}
i915_gem_retire_requests(dev);
/** Unbinds all inactive objects. */
int
i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
int ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
if (obj->pin_count == 0)
WARN_ON(i915_gem_object_unbind(obj));
}
}
return 0;
return ret;
}
......@@ -1116,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
......@@ -1225,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev, true);
ret = i915_gpu_idle(dev);
if (ret)
goto err;
i915_gem_retire_requests(dev);
BUG_ON(ring->sync_seqno[i]);
}
......
......@@ -317,7 +317,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev, false)) {
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
......
/*
* Copyright © 2008-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
* use by the OS and so the user finds that his system has less memory
* available than he put in. We refer to this memory as stolen.
*
* The BIOS will allocate its framebuffer from the stolen memory. Our
* goal is try to reuse that object for our own fbcon which must always
* be available for panics. Anything else we can reuse the stolen memory
* for is a boon.
*/
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
#define PTE_MAPPING_TYPE_MASK (3 << 1)
#define PTE_VALID (1 << 0)
/**
* i915_stolen_to_phys - take an offset into stolen memory and turn it into
* a physical one
* @dev: drm device
* @offset: address to translate
*
* Some chip functions require allocations from stolen space and need the
* physical address of the memory in question.
*/
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;
#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so compute the base by subtracting the stolen memory
* from the Top of Low Usable DRAM which is where the BIOS places
* the graphics stolen memory.
*/
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
/* top 32bits are reserved = 0 */
pci_read_config_dword(pdev, 0xA4, &base);
} else {
/* XXX presume 8xx is the same as i915 */
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
}
#else
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
u16 val;
pci_read_config_word(pdev, 0xb0, &val);
base = val >> 4 << 20;
} else {
u8 val;
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
}
base -= dev_priv->mm.gtt->stolen_size;
#endif
return base + offset;
}
static void i915_warn_stolen(struct drm_device *dev)
{
DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
}
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
if (!cfb_base)
goto err_fb;
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
compressed_llb = drm_mm_get_block(compressed_llb,
4096, 4096);
if (!compressed_llb)
goto err_fb;
ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
if (!ll_base)
goto err_llb;
}
dev_priv->cfb_size = size;
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
cfb_base, ll_base, size >> 20);
return;
err_llb:
drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
}
static void i915_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_mm_put_block(dev_priv->compressed_fb);
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
/* Leave 1M for line length buffer & misc. */
/* Try to get a 32M buffer... */
if (prealloc_size > (36*1024*1024))
cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
return 0;
}
......@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
* need to ensure that any fence register is updated before
* the next fenced (either through the GTT or by the BLT unit
* on older GPUs) access.
*
* After updating the tiling parameters, we then flag whether
* we need to update an associated fence register. Note this
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
i915_gem_release_mmap(obj);
obj->map_and_fenceable =
obj->gtt_space == NULL ||
......@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
if (ret == 0) {
obj->tiling_changed = true;
obj->fence_dirty =
obj->fenced_gpu_access ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
......
This diff is collapsed.
......@@ -29,6 +29,9 @@
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
......@@ -79,6 +82,7 @@
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_RESET_ENABLE (1<<0)
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
......@@ -425,8 +429,6 @@
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
......@@ -514,9 +516,6 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
......@@ -572,7 +571,6 @@
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
......@@ -637,7 +635,6 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
......@@ -3224,11 +3221,14 @@
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
......@@ -3402,15 +3402,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
......
......@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
dpll_reg = PCH_DPLL(pipe);
dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
......
This diff is collapsed.
......@@ -688,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
......@@ -702,24 +702,30 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = intel_dp->panel_fixed_mode->clock;
}
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], mode->clock);
if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
mode_rate = intel_dp_link_required(mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(mode->clock, bpp)
<= link_avail) {
if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
adjusted_mode->clock, bpp);
DRM_DEBUG_KMS("DP link bw required %i available %i\n",
mode_rate, link_avail);
return true;
}
}
......@@ -2439,6 +2445,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
......
......@@ -183,8 +183,8 @@ struct intel_crtc {
bool cursor_visible;
unsigned int bpp;
bool no_pll; /* tertiary pipe for IVB */
bool use_pll_a;
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
};
struct intel_plane {
......@@ -238,6 +238,8 @@ struct cxsr_latency {
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
......@@ -271,8 +273,8 @@ struct dip_infoframe {
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
/* PB5 - PR 3:0 */
uint8_t PR;
/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
......@@ -346,6 +348,8 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
......@@ -405,10 +409,6 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_disable_rc6(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
......@@ -466,5 +466,13 @@ extern void intel_init_pm(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
......@@ -220,7 +220,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
intel_hdmi->write_infoframe(encoder, frame);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
......@@ -228,6 +229,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
.len = DIP_LEN_AVI,
};
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
intel_set_infoframe(encoder, &avi_if);
}
......@@ -290,7 +294,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -2,7 +2,7 @@
#define _INTEL_RINGBUFFER_H_
struct intel_hw_status_page {
u32 __iomem *page_addr;
u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
......@@ -56,12 +56,9 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 irq_seqno; /* last seq seem at irq time */
u32 trace_irq_seqno;
u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
......@@ -118,7 +115,6 @@ struct intel_ring_buffer {
u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
drm_local_map_t map;
void *private;
};
......@@ -152,7 +148,9 @@ static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
return ioread32(ring->status_page.page_addr + reg);
/* Ensure that the compiler doesn't optimize away the load. */
barrier();
return ring->status_page.page_addr[reg];
}
/**
......@@ -170,10 +168,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment