Commit 4821ff14 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2013-09-21-merged' of...

Merge tag 'drm-intel-next-2013-09-21-merged' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

drm-intel-next-2013-09-21:
- clock state handling rework from Ville
- l3 parity handling fixes for hsw from Ben
- some more watermark improvements from Ville
- ban badly behaved context from Mika
- a few vlv improvements from Jesse
- VGA power domain handling from Ville
drm-intel-next-2013-09-06:
- Basic mipi dsi support from Jani. Not yet converted over to drm_bridge
  since that was too fresh, but the porting is in progress already.
- More vma patches from Ben, this time the code to convert the execbuffer
  code. Now that the shrinker recursion bug is tracked down we can move
  ahead here again. Yay!
- Optimize hw context switching to not generate needless interrupts (Chris
  Wilson). Also some shuffling for the oustanding request allocation.
- Opregion support for SWSCI, although not yet fully wired up (we need a
  bit of runtime D3 support for that apparently, due to Windows design
  deficiencies), from Jani Nikula.
- A few smaller changes all over.

[airlied: merge conflict fix in i9xx_set_pipeconf]

* tag 'drm-intel-next-2013-09-21-merged' of git://people.freedesktop.org/~danvet/drm-intel: (119 commits)
  drm/i915: assume all GM45 Acer laptops use inverted backlight PWM
  drm/i915: cleanup a min_t() cast
  drm/i915: Pull intel_init_power_well() out of intel_modeset_init_hw()
  drm/i915: Add POWER_DOMAIN_VGA
  drm/i915: Refactor power well refcount inc/dec operations
  drm/i915: Add intel_display_power_{get, put} to request power for specific domains
  drm/i915: Change i915_request power well handling
  drm/i915: POSTING_READ IPS_CTL before waiting for the vblank
  drm/i915: don't disable ERR_INT on the IRQ handler
  drm/i915/vlv: disable rc6p and rc6pp residency reporting on BYT
  drm/i915/vlv: honor i915_enable_rc6 boot param on VLV
  drm/i915: s/HAS_L3_GPU_CACHE/HAS_L3_DPF
  drm/i915: Do remaps for all contexts
  drm/i915: Keep a list of all contexts
  drm/i915: Make l3 remapping use the ring
  drm/i915: Add second slice l3 remapping
  drm/i915: Fix HSW parity test
  drm/i915: dump crtc timings from the pipe config
  drm/i915: register backlight device also when backlight class is a module
  drm/i915: write D_COMP using the mailbox
  ...

Conflicts:
	drivers/gpu/drm/i915/intel_display.c
parents 15c03dd4 b599c89e
......@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
......@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
};
void drm_connector_ida_init(void)
......
......@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_dsi.o \
intel_dsi_cmd.o \
intel_dsi_pll.o \
intel_bios.o \
intel_ddi.o \
intel_dp.o \
......
......@@ -76,17 +76,6 @@ struct intel_dvo_dev_ops {
int (*mode_valid)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode);
/*
* Callback to adjust the mode to be set in the CRTC.
*
* This allows an output to adjust the clock or even the entire set of
* timings, which is used for panels with fixed timings or for
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/*
* Callback for preparing mode changes on an output
*/
......
......@@ -145,6 +145,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", obj->ring->name);
}
static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
{
seq_putc(m, ctx->is_initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' ');
}
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
......@@ -1442,6 +1449,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct i915_hw_context *ctx;
int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
......@@ -1460,13 +1468,16 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
for_each_ring(ring, dev_priv, i) {
if (ring->default_context) {
seq_printf(m, "HW default context %s ring ", ring->name);
describe_obj(m, ring->default_context->obj);
list_for_each_entry(ctx, &dev_priv->context_list, link) {
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
for_each_ring(ring, dev_priv, i)
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name);
describe_obj(m, ctx->obj);
seq_putc(m, '\n');
}
}
mutex_unlock(&dev->mode_config.mutex);
......@@ -1610,27 +1621,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_DIV_A));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_DIV_B));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
mutex_unlock(&dev_priv->dpio_lock);
......
......@@ -52,7 +52,7 @@
intel_ring_emit(LP_RING(dev_priv), x)
#define ADVANCE_LP_RING() \
intel_ring_advance(LP_RING(dev_priv))
__intel_ring_advance(LP_RING(dev_priv))
/**
* Lock test for when it's just for synchronization of ring access.
......@@ -1324,6 +1324,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
intel_init_power_well(dev);
intel_modeset_gem_init(dev);
/* Always safe in the mode setting case. */
......
......@@ -576,11 +576,20 @@ static void intel_resume_hotplug(struct drm_device *dev)
drm_helper_hpd_irq_event(dev);
}
static int __i915_drm_thaw(struct drm_device *dev)
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
i915_restore_state(dev);
intel_opregion_setup(dev);
......@@ -596,6 +605,8 @@ static int __i915_drm_thaw(struct drm_device *dev)
/* We need working interrupts for modeset enabling ... */
drm_irq_install(dev);
intel_init_power_well(dev);
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
......@@ -640,19 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
__i915_drm_thaw(dev);
return error;
return __i915_drm_thaw(dev, true);
}
int i915_resume(struct drm_device *dev)
......@@ -668,20 +667,12 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
intel_uncore_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need this since the BIOS might clear all our scratch PTEs.
* earlier) need to restore the GTT mappings since the BIOS might clear
* all our scratch PTEs.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
!dev_priv->opregion.header) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
ret = __i915_drm_thaw(dev);
ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
if (ret)
return ret;
......@@ -719,10 +710,6 @@ int i915_reset(struct drm_device *dev)
simulated = dev_priv->gpu_error.stop_rings != 0;
if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
ret = -ENODEV;
} else {
ret = intel_gpu_reset(dev);
/* Also reset the gpu hangman. */
......@@ -734,9 +721,8 @@ int i915_reset(struct drm_device *dev)
"error for simulated gpu hangs\n");
ret = 0;
}
} else
dev_priv->gpu_error.last_reset = get_seconds();
}
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
......@@ -799,6 +785,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
DRM_INFO("This hardware requires preliminary hardware support.\n"
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
return -ENODEV;
}
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
......
......@@ -99,6 +99,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
POWER_DOMAIN_VGA,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
......@@ -225,6 +226,8 @@ struct intel_opregion {
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
......@@ -326,6 +329,8 @@ struct drm_i915_error_state {
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
int hangcheck_score[I915_NUM_RINGS];
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
struct intel_crtc_config;
......@@ -357,7 +362,7 @@ struct drm_i915_display_funcs {
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
......@@ -367,7 +372,6 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
......@@ -420,6 +424,7 @@ struct intel_uncore {
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
func(is_preliminary) sep \
func(has_force_wake) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
......@@ -568,6 +573,13 @@ struct i915_vma {
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
};
struct i915_ctx_hang_stats {
......@@ -576,6 +588,12 @@ struct i915_ctx_hang_stats {
/* This context had batch active when hang was declared */
unsigned batch_active;
/* Time when this context was last blamed for a GPU reset */
unsigned long guilty_ts;
/* This context is banned to submit more work */
bool banned;
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
......@@ -584,10 +602,13 @@ struct i915_hw_context {
struct kref ref;
int id;
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct list_head link;
};
struct i915_fbc {
......@@ -900,9 +921,11 @@ struct i915_ums_state {
int mm_suspended;
};
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info;
u32 *remap_info[MAX_L3_SLICES];
struct work_struct error_work;
int which_slice;
};
struct i915_gem_mm {
......@@ -977,6 +1000,9 @@ struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
struct timer_list hangcheck_timer;
/* For reset and error_state handling. */
......@@ -985,8 +1011,6 @@ struct i915_gpu_error {
struct drm_i915_error_state *first_error;
struct work_struct work;
unsigned long last_reset;
/**
* State variable and reset counter controlling the reset flow
*
......@@ -1058,6 +1082,11 @@ struct intel_vbt_data {
int edp_bpp;
struct edp_power_seq edp_pps;
/* MIPI DSI */
struct {
u16 panel_id;
} dsi;
int crt_ddc_pin;
int child_dev_num;
......@@ -1318,6 +1347,7 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
struct list_head context_list;
u32 fdi_rx_config;
......@@ -1398,8 +1428,6 @@ struct drm_i915_gem_object {
struct list_head ring_list;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
* This is set if the object is on the active lists (has pending
......@@ -1485,13 +1513,6 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
......@@ -1600,6 +1621,9 @@ struct drm_i915_file_private {
((dev)->pci_device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
/*
* The genX designation typically refers to the render engine, so render
......@@ -1668,7 +1692,9 @@ struct drm_i915_file_private {
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
......@@ -1828,8 +1854,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
......@@ -1931,7 +1955,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
......@@ -2090,6 +2114,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
unsigned cache_level,
bool mappable,
bool nonblock);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
......@@ -2182,15 +2207,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
struct intel_encoder;
extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
#else
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
return 0;
}
static inline int
intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
return 0;
}
#endif
/* intel_acpi.c */
......@@ -2252,8 +2292,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
......
This diff is collapsed.
......@@ -73,7 +73,7 @@
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
* GPU. The GPU has loaded it's state already and has stored away the gtt
* GPU. The GPU has loaded its state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
......@@ -129,6 +129,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
list_del(&ctx->link);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
......@@ -147,6 +148,7 @@ create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
INIT_LIST_HEAD(&ctx->link);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
......@@ -166,6 +168,7 @@ create_hw_context(struct drm_device *dev,
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
list_add_tail(&ctx->link, &dev_priv->context_list);
/* Default context will never have a file_priv */
if (file_priv == NULL)
......@@ -178,6 +181,10 @@ create_hw_context(struct drm_device *dev,
ctx->file_priv = file_priv;
ctx->id = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
return ctx;
......@@ -393,11 +400,11 @@ static int do_switch(struct i915_hw_context *to)
struct intel_ring_buffer *ring = to->ring;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
int ret, i;
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
if (from == to)
if (from == to && !to->remap_slice)
return 0;
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
......@@ -420,8 +427,6 @@ static int do_switch(struct i915_hw_context *to)
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
......@@ -429,6 +434,18 @@ static int do_switch(struct i915_hw_context *to)
return ret;
}
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
ret = i915_gem_l3_remap(ring, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
else
to->remap_slice &= ~(1<<i);
}
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
......@@ -451,17 +468,7 @@ static int do_switch(struct i915_hw_context *to)
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
ret = i915_add_request(ring, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is
* consistent with out tracking. In case of emergency,
* scream.
*/
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
return ret;
}
/* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
......
......@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
if (vma->obj->pin_count)
return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
......@@ -113,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
}
/* We expect the caller to unpin, evict all and try again, or give up.
* So calling i915_gem_evict_everything() is unnecessary.
* So calling i915_gem_evict_vm() is unnecessary.
*/
return -ENOSPC;
......@@ -152,12 +155,46 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
return ret;
}
/**
* i915_gem_evict_vm - Try to free up VM space
*
* @vm: Address space to evict from
* @do_idle: Boolean directing whether to idle first.
*
* VM eviction is about freeing up virtual address space. If one wants fine
* grained eviction, they should see evict something for more details. In terms
* of freeing up actual system memory, this function may not accomplish the
* desired result. An object may be shared in multiple address space, and this
* function will not assert those objects be freed.
*
* Using do_idle will result in a more complete eviction because it retires, and
* inactivates current BOs.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
{
struct i915_vma *vma, *next;
int ret;
if (do_idle) {
ret = i915_gpu_idle(vm->dev);
if (ret)
return ret;
i915_gem_retire_requests(vm->dev);
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->obj->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
return 0;
}
int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm;
struct i915_vma *vma, *next;
bool lists_empty = true;
int ret;
......@@ -184,11 +221,8 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->obj->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
}
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false));
return 0;
}
This diff is collapsed.
......@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
vma = i915_gem_vma_create(obj, ggtt);
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
......
......@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
}
static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
{
switch (a) {
case HANGCHECK_IDLE:
return "idle";
case HANGCHECK_WAIT:
return "wait";
case HANGCHECK_ACTIVE:
return "active";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
return "hung";
}
return "unknown";
}
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_state *error,
......@@ -255,6 +273,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(error->hangcheck_action[ring]),
error->hangcheck_score[ring]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
......@@ -720,6 +741,9 @@ static void i915_record_ring_state(struct drm_device *dev,
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
error->hangcheck_score[ring->id] = ring->hangcheck.score;
error->hangcheck_action[ring->id] = ring->hangcheck.action;
}
......
......@@ -665,7 +665,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
crtc);
}
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
static bool intel_hpd_irq_event(struct drm_device *dev,
struct drm_connector *connector)
{
enum drm_connector_status old_status;
......@@ -673,11 +674,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
if (old_status == connector->status)
return false;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
return (old_status != connector->status);
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
return true;
}
/*
......@@ -882,9 +888,10 @@ static void ivybridge_parity_work(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
char *parity_event[6];
uint32_t misccpctl;
unsigned long flags;
uint8_t slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
......@@ -892,55 +899,81 @@ static void ivybridge_parity_work(struct work_struct *work)
*/
mutex_lock(&dev_priv->dev->struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
if (WARN_ON(!dev_priv->l3_parity.which_slice))
goto out;
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
error_status = I915_READ(GEN7_L3CDERRST1);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
u32 reg;
I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
GEN7_L3CDERRST1_ENABLE);
POSTING_READ(GEN7_L3CDERRST1);
slice--;
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
break;
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
dev_priv->l3_parity.which_slice &= ~(1<<slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
reg = GEN7_L3CDERRST1 + (slice * 0x200);
mutex_unlock(&dev_priv->dev->struct_mutex);
error_status = I915_READ(reg);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
POSTING_READ(reg);
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
parity_event[4] = NULL;
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
parity_event[5] = NULL;
kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
KOBJ_CHANGE, parity_event);
DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
row, bank, subbank);
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
slice, row, bank, subbank);
kfree(parity_event[4]);
kfree(parity_event[3]);
kfree(parity_event[2]);
kfree(parity_event[1]);
}
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (!HAS_L3_GPU_CACHE(dev))
if (!HAS_L3_DPF(dev))
return;
spin_lock(&dev_priv->irq_lock);
ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
iir &= GT_PARITY_ERROR(dev);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
dev_priv->l3_parity.which_slice |= 1 << 1;
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
dev_priv->l3_parity.which_slice |= 1 << 0;
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
......@@ -975,8 +1008,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
i915_handle_error(dev, false);
}
if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
ivybridge_parity_error_irq_handler(dev);
if (gt_iir & GT_PARITY_ERROR(dev))
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
#define HPD_STORM_DETECT_PERIOD 1000
......@@ -1388,7 +1421,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
bool err_int_reenable = false;
atomic_inc(&dev_priv->irq_received);
......@@ -1412,17 +1444,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
/* On Haswell, also mask ERR_INT because we don't want to risk
* generating "unclaimed register" interrupts from inside the interrupt
* handler. */
if (IS_HASWELL(dev)) {
spin_lock(&dev_priv->irq_lock);
err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
if (err_int_reenable)
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
if (INTEL_INFO(dev)->gen >= 6)
......@@ -1452,13 +1473,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
}
}
if (err_int_reenable) {
spin_lock(&dev_priv->irq_lock);
if (ivb_can_enable_err_int(dev))
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
......@@ -2021,6 +2035,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
......@@ -2049,6 +2065,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
acthd);
switch (ring->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
......@@ -2064,6 +2081,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
}
} else {
ring->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
......@@ -2254,10 +2273,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
if (HAS_L3_GPU_CACHE(dev)) {
if (HAS_L3_DPF(dev)) {
/* L3 parity interrupt is always unmasked. */
dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
gt_irqs |= GT_PARITY_ERROR(dev);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
......
This diff is collapsed.
......@@ -340,7 +340,9 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, LBB,
&dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
......@@ -390,7 +392,9 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
if (INTEL_INFO(dev)->gen <= 4)
pci_write_config_byte(dev->pdev, LBB,
dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
......
......@@ -65,6 +65,8 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
if (IS_VALLEYVIEW(dminor->dev))
rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
......@@ -73,6 +75,8 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
if (IS_VALLEYVIEW(dminor->dev))
rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
......@@ -97,7 +101,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
if (!HAS_L3_GPU_CACHE(dev))
if (!HAS_L3_DPF(dev))
return -EPERM;
if (offset % 4 != 0)
......@@ -118,28 +122,31 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
uint32_t misccpctl;
int i, ret;
int slice = (int)(uintptr_t)attr->private;
int ret;
count = round_down(count, 4);
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
if (dev_priv->l3_parity.remap_info[slice])
memcpy(buf,
dev_priv->l3_parity.remap_info[slice] + (offset/4),
count);
else
memset(buf, 0, count);
mutex_unlock(&drm_dev->struct_mutex);
return i - offset;
return count;
}
static ssize_t
......@@ -151,18 +158,23 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
struct i915_hw_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
if (dev_priv->hw_contexts_disabled)
return -ENXIO;
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
if (!dev_priv->l3_parity.remap_info) {
if (!dev_priv->l3_parity.remap_info[slice]) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
......@@ -182,13 +194,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
dev_priv->l3_parity.remap_info = temp;
dev_priv->l3_parity.remap_info[slice] = temp;
memcpy(dev_priv->l3_parity.remap_info + (offset/4),
buf + (offset/4),
count);
memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
i915_gem_l3_remap(drm_dev);
/* NB: We defer the remapping until we switch to the context */
list_for_each_entry(ctx, &dev_priv->context_list, link)
ctx->remap_slice |= (1<<slice);
mutex_unlock(&drm_dev->struct_mutex);
......@@ -200,7 +212,17 @@ static struct bin_attribute dpf_attrs = {
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
.mmap = NULL
.mmap = NULL,
.private = (void *)0
};
static struct bin_attribute dpf_attrs_1 = {
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
.mmap = NULL,
.private = (void *)1
};
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
......@@ -507,10 +529,17 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
#endif
if (HAS_L3_GPU_CACHE(dev)) {
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
if (NUM_L3_SLICES(dev) > 1) {
ret = device_create_bin_file(&dev->primary->kdev,
&dpf_attrs_1);
if (ret)
DRM_ERROR("l3 parity slice 1 setup failed\n");
}
}
ret = 0;
......@@ -534,6 +563,7 @@ void i915_teardown_sysfs(struct drm_device *dev)
sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
else
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
......
......@@ -568,6 +568,21 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
}
static void
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_mipi *mipi;
mipi = find_section(bdb, BDB_MIPI);
if (!mipi) {
DRM_DEBUG_KMS("No MIPI BDB found");
return;
}
/* XXX: add more info */
dev_priv->vbt.dsi.panel_id = mipi->panel_id;
}
static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
......@@ -745,6 +760,7 @@ intel_parse_bios(struct drm_device *dev)
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
if (bios)
pci_unmap_rom(pdev, bios);
......
......@@ -104,6 +104,7 @@ struct vbios_data {
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
#define BDB_MIPI 50
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
......@@ -618,4 +619,44 @@ int intel_parse_bios(struct drm_device *dev);
#define PORT_IDPC 8
#define PORT_IDPD 9
/* MIPI DSI panel info */
struct bdb_mipi {
u16 panel_id;
u16 bridge_revision;
/* General params */
u32 dithering:1;
u32 bpp_pixel_format:1;
u32 rsvd1:1;
u32 dphy_valid:1;
u32 resvd2:28;
u16 port_info;
u16 rsvd3:2;
u16 num_lanes:2;
u16 rsvd4:12;
/* DSI config */
u16 virt_ch_num:2;
u16 vtm:2;
u16 rsvd5:12;
u32 dsi_clock;
u32 bridge_ref_clk;
u16 rsvd_pwr;
/* Dphy Params */
u32 prepare_cnt:5;
u32 rsvd6:3;
u32 clk_zero_cnt:8;
u32 trail_cnt:5;
u32 rsvd7:3;
u32 exit_zero_cnt:6;
u32 rsvd8:2;
u32 hl_switch_cnt;
u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
} __attribute__((packed));
#endif /* _I830_BIOS_H_ */
......@@ -89,6 +89,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
int dotclock;
tmp = I915_READ(crt->adpa_reg);
......@@ -103,6 +104,13 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
pipe_config->adjusted_mode.clock = dotclock;
}
/* Note: The caller is required to filter out dpms modes not supported by the
......@@ -349,9 +357,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
/* FIXME: debug force function and remove */
ret = true;
return ret;
}
......
......@@ -58,7 +58,7 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
......@@ -767,9 +767,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
......@@ -1268,6 +1268,37 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
case TRANS_DDI_BPC_8:
pipe_config->pipe_bpp = 24;
break;
case TRANS_DDI_BPC_10:
pipe_config->pipe_bpp = 30;
break;
case TRANS_DDI_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->has_dp_encoder = true;
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
default:
break;
}
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
......
This diff is collapsed.
This diff is collapsed.
......@@ -93,13 +93,17 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
#define INTEL_DSI_COMMAND_MODE 0
#define INTEL_DSI_VIDEO_MODE 1
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
......@@ -207,8 +211,21 @@ struct intel_crtc_config {
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
/* User requested mode, only valid as a starting point to
* compute adjusted_mode, except in the case of (S)DVO where
* it's also for the output timings of the (S)DVO chip.
* adjusted_mode will then correspond to the S(DVO) chip's
* preferred input timings. */
struct drm_display_mode requested_mode;
/* Actual pipe timings ie. what we program into the pipe timing
* registers. adjusted_mode.clock is the pipe pixel clock. */
struct drm_display_mode adjusted_mode;
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
int pipe_src_w, pipe_src_h;
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
......@@ -262,7 +279,8 @@ struct intel_crtc_config {
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode.
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
int port_clock;
......@@ -288,6 +306,8 @@ struct intel_crtc_config {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
bool double_wide;
};
struct intel_crtc {
......@@ -522,6 +542,7 @@ extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
extern void intel_mark_idle(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
extern bool intel_dsi_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
......@@ -708,9 +729,10 @@ extern void intel_write_eld(struct drm_encoder *encoder,
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
extern enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_watermarks(struct drm_crtc *crtc);
extern void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
......@@ -741,8 +763,13 @@ extern void i915_remove_power_well(struct drm_device *dev);
extern bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_display_power_get(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_display_power_put(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_resume_power_well(struct drm_device *dev);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void ironlake_teardown_rc6(struct drm_device *dev);
......@@ -793,6 +820,14 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
extern void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
extern int intel_dotclock_calculate(int link_freq,
const struct intel_link_m_n *m_n);
extern void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
int dotclock);
extern bool intel_crtc_active(struct drm_crtc *crtc);
extern void i915_disable_vga_mem(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
pipe_config->adjusted_mode.clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
......@@ -267,11 +269,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
if (intel_dvo->dev.dev_ops->mode_fixup)
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
&pipe_config->requested_mode,
adjusted_mode);
return true;
}
......
This diff is collapsed.
......@@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp, flags = 0;
int dotclock;
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
......@@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
pipe_config->adjusted_mode.clock = dotclock;
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
......
This diff is collapsed.
......@@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
struct intel_crtc *crtc)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
if (!crtc->active)
return -EINVAL;
/* can't use the overlay with double wide pipe */
if (INTEL_INFO(overlay->dev)->gen < 4 &&
(I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
if (crtc->config.double_wide)
return -EINVAL;
return 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment