Commit f60de976 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2015-07-17' of git://anongit.freedesktop.org/drm-intel into drm-next

- prelim hw support dropped for skl after Damien fixed an ABI issue around
  planes
- legacy modesetting is done using atomic infrastructure now (Maarten)!
- more gen9 workarounds (Arun&Nick)
- MOCS programming (cache control for better performance) for skl/bxt
- vlv/chv dpll improvements (Ville)
- PSR fixes from Rodrigo
- fbc improvements from Paulo
- plumb requests into execlist submit functions (Mika)
- opregion code cleanup from Jani
- resource streamer support from Abdiel for mesa
- final fixes for 12bpc hdmi + enabling support from Ville
drm-intel-next-2015-07-03:
- dsi improvements (Gaurav)
- bxt ddi dpll hw state readout (Imre)
- chv dvfs support and overall wm improvements for both vlv and chv (Ville)
- ppgtt polish from Mika and Michel
- cdclk support for bxt (Bob Pauwe)
- make frontbuffer tracking more precise
- OLR removal (John Harrison)
- per-ctx WA batch buffer support (Arun Siluvery)
- remvoe KMS Kconfig option (Chris)
- more hpd handling refactoring from Jani
- use atomic states throughout modeset code and integrate with atomic plane
  update (Maarten)
drm-intel-next-2015-06-19:
- refactoring hpd irq handlers (Jani)
- polish skl dpll code a bit (Damien)
- dynamic cdclk adjustement (Ville & Mika)
- fix up 12bpc hdmi and enable it for real again (Ville)
- extend hsw cmd parser to be useful for atomic configuration (Franscico Jerez)
- even more atomic conversion and rolling state handling out across modeset code
  from Maarten & Ander
- fix DRRS idleness detection (Ramalingam)
- clean up dsp address alignment handling (Ville)
- some fbc cleanup patches from Paulo
- prevent hard-hangs when trying to reset the gpu on skl (Mika)

* tag 'drm-intel-next-2015-07-17' of git://anongit.freedesktop.org/drm-intel: (386 commits)
  drm/i915: Update DRIVER_DATE to 20150717
  drm/i915/skl: Drop the preliminary_hw_support flag
  drm/i915/skl: Don't expose the top most plane on gen9 display
  drm/i915: Fix divide by zero on watermark update
  drm/i915: Invert fastboot check
  drm/i915: Clarify logic for initial modeset
  drm/i915: Unconditionally check gmch pfit state
  drm/i915: always disable irqs in intel_pipe_update_start
  drm/i915: Remove use of runtime pm in atomic commit functions
  drm/i915: Call plane update functions directly from intel_atomic_commit.
  drm/i915: Use full atomic modeset.
  drm/i915/gen9: Add WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken
  drm/i915/gen9: Add WaFlushCoherentL3CacheLinesAtContextSwitch workaround
  drm/i915/gen9: Add WaDisableCtxRestoreArbitration workaround
  drm/i915: Enable WA batch buffers for Gen9
  drm/i915/gen9: Implement WaDisableKillLogic for gen 9
  drm/i915: Use expcitly fixed type in compat32 structs
  drm/i915: Fix noatomic crtc disabling, v2.
  drm/i915: fill in more mode members
  drm/i915: Added BXT check in HAS_CORE_RING_FREQ macro
  ...
parents fa78ceab e0548f19
......@@ -4012,7 +4012,6 @@ int num_ioctls;</synopsis>
<title>Frontbuffer Tracking</title>
!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
!Idrivers/gpu/drm/i915/intel_frontbuffer.c
!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
</sect2>
<sect2>
......@@ -4044,6 +4043,11 @@ int num_ioctls;</synopsis>
probing, so those sections fully apply.
</para>
</sect2>
<sect2>
<title>Hotplug</title>
!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
!Idrivers/gpu/drm/i915/intel_hotplug.c
</sect2>
<sect2>
<title>High Definition Audio</title>
!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
......
......@@ -207,7 +207,6 @@ CONFIG_AGP_AMD64=y
CONFIG_AGP_INTEL=y
CONFIG_DRM=y
CONFIG_DRM_I915=y
CONFIG_DRM_I915_KMS=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
......
......@@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, unsigned long *mappable_end)
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, u64 *mappable_end)
{
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
......
......@@ -4471,9 +4471,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev,
goto err_created;
}
if (old_blob)
drm_property_unreference_blob(old_blob);
*replace = new_blob;
return 0;
......
......@@ -36,15 +36,6 @@ config DRM_I915
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
default y
help
Choose this option if you want kernel modesetting enabled by default.
If in doubt, say "Y".
config DRM_I915_FBDEV
bool "Enable legacy fbdev support for the modesetting intel driver"
depends on DRM_I915
......
......@@ -34,7 +34,9 @@ i915-y += i915_cmd_parser.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
intel_hotplug.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
intel_uncore.o
......
......@@ -22,6 +22,7 @@
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Thomas Richter <thor@math.tu-berlin.de>
*
* Minor modifications (Dithering enable):
* Thomas Richter <thor@math.tu-berlin.de>
......@@ -90,7 +91,7 @@
/*
* LCD Vertical Display Size
*/
#define VR21 0x20
#define VR21 0x21
/*
* Panel power down status
......@@ -155,16 +156,33 @@
# define VR8F_POWER_MASK (0x3c)
# define VR8F_POWER_POS (2)
/* Some Bios implementations do not restore the DVO state upon
* resume from standby. Thus, this driver has to handle it
* instead. The following list contains all registers that
* require saving.
*/
static const uint16_t backup_addresses[] = {
0x11, 0x12,
0x18, 0x19, 0x1a, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x8e, 0x8f,
0x10 /* this must come last */
};
struct ivch_priv {
bool quiet;
uint16_t width, height;
/* Register backup */
uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
};
static void ivch_dump_regs(struct intel_dvo_device *dvo);
/**
* Reads a register on the ivch.
*
......@@ -246,6 +264,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
{
struct ivch_priv *priv;
uint16_t temp;
int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
if (priv == NULL)
......@@ -273,6 +292,14 @@ static bool ivch_init(struct intel_dvo_device *dvo,
ivch_read(dvo, VR20, &priv->width);
ivch_read(dvo, VR21, &priv->height);
/* Make a backup of the registers to be able to restore them
* upon suspend.
*/
for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
ivch_dump_regs(dvo);
return true;
out:
......@@ -294,12 +321,31 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
return MODE_OK;
}
/* Restore the DVO registers after a resume
* from RAM. Registers have been saved during
* the initialization.
*/
static void ivch_reset(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
int i;
DRM_DEBUG_KMS("Resetting the IVCH registers\n");
ivch_write(dvo, VR10, 0x0000);
for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
}
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
ivch_reset(dvo);
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return;
......@@ -308,6 +354,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
if (enable)
......@@ -334,6 +381,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
uint16_t vr01;
ivch_reset(dvo);
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return false;
......@@ -348,11 +397,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ivch_priv *priv = dvo->dev_priv;
uint16_t vr40 = 0;
uint16_t vr01 = 0;
uint16_t vr10;
ivch_read(dvo, VR10, &vr10);
ivch_reset(dvo);
vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
/* Enable dithering for 18 bpp pipelines */
vr10 &= VR10_INTERFACE_DEPTH_MASK;
if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
......@@ -366,7 +419,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
uint16_t x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
vr40 |= VR40_CLOCK_GATING_ENABLE;
x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
......@@ -381,8 +434,6 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
ivch_write(dvo, VR01, vr01);
ivch_write(dvo, VR40, vr40);
ivch_dump_regs(dvo);
}
static void ivch_dump_regs(struct intel_dvo_device *dvo)
......
......@@ -131,7 +131,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
......@@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
}
......@@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* allowed mask/value pair given in the whitelist entry.
*/
if (reg->mask) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
reg_addr);
return false;
......
This diff is collapsed.
......@@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (!value)
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck &&
intel_has_gpu_reset(dev);
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev);
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
......@@ -719,11 +726,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info;
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
* one of those, not both. Until we can safely expose the topmost plane
* as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
* we don't expose the topmost plane at all to prevent ABI breakage
* down the line.
*/
if (IS_BROXTON(dev)) {
info->num_sprites[PIPE_A] = 3;
info->num_sprites[PIPE_B] = 3;
info->num_sprites[PIPE_C] = 2;
} else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
......@@ -933,8 +948,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
if (dev_priv->dp_wq == NULL) {
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
if (dev_priv->hotplug.dp_wq == NULL) {
DRM_ERROR("Failed to create our dp workqueue.\n");
ret = -ENOMEM;
goto out_freewq;
......@@ -1029,7 +1044,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
out_freedpwq:
destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->hotplug.dp_wq);
out_freewq:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
......@@ -1116,6 +1131,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev);
intel_csr_ucode_fini(dev);
......@@ -1123,7 +1139,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
......
......@@ -356,7 +356,6 @@ static const struct intel_device_info intel_cherryview_info = {
};
static const struct intel_device_info intel_skylake_info = {
.is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
......@@ -369,7 +368,6 @@ static const struct intel_device_info intel_skylake_info = {
};
static const struct intel_device_info intel_skylake_gt3_info = {
.is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
......@@ -440,9 +438,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
{0, 0, 0}
};
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
void intel_detect_pch(struct drm_device *dev)
{
......@@ -541,21 +537,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return true;
}
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->long_hpd_port_mask = 0;
dev_priv->short_hpd_port_mask = 0;
dev_priv->hpd_event_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->dig_port_work);
cancel_work_sync(&dev_priv->hotplug_work);
cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
}
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
......@@ -601,7 +582,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
pci_power_t opregion_target_state;
int error;
......@@ -632,8 +612,7 @@ static int i915_drm_suspend(struct drm_device *dev)
* for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
for_each_crtc(dev, crtc)
intel_crtc_control(crtc, false);
intel_display_suspend(dev);
drm_modeset_unlock_all(dev);
intel_dp_mst_suspend(dev);
......@@ -760,7 +739,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
intel_display_resume(dev);
drm_modeset_unlock_all(dev);
intel_dp_mst_resume(dev);
......@@ -865,9 +844,6 @@ int i915_reset(struct drm_device *dev)
bool simulated;
int ret;
if (!i915.reset)
return 0;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
......@@ -1727,21 +1703,15 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
*
* Otherwise, just follow the parameter (defaulting to off).
*
* Allow optional vga_text_mode_force boot option to override
* the default behavior.
* Enable KMS by default, unless explicitly overriden by
* either the i915.modeset prarameter or by the
* vga_text_mode_force boot option.
*/
#if defined(CONFIG_DRM_I915_KMS)
if (i915.modeset != 0)
driver.driver_features |= DRIVER_MODESET;
#endif
if (i915.modeset == 1)
driver.driver_features |= DRIVER_MODESET;
if (i915.modeset == 0)
driver.driver_features &= ~DRIVER_MODESET;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && i915.modeset == -1)
driver.driver_features &= ~DRIVER_MODESET;
......@@ -1759,7 +1729,7 @@ static int __init i915_init(void)
* to the atomic ioctl and the atomic properties. Only plane operations on
* a single CRTC will actually work.
*/
if (i915.nuclear_pageflip)
if (driver.driver_features & DRIVER_MODESET)
driver.driver_features |= DRIVER_ATOMIC;
return drm_pci_init(&driver, &i915_pci_driver);
......
This diff is collapsed.
This diff is collapsed.
......@@ -407,30 +407,21 @@ void i915_gem_context_fini(struct drm_device *dev)
i915_gem_context_unreference(dctx);
}
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring;
int ret, i;
BUG_ON(!dev_priv->ring[RCS].default_context);
struct intel_engine_cs *ring = req->ring;
int ret;
if (i915.enable_execlists) {
for_each_ring(ring, dev_priv, i) {
if (ring->init_context) {
ret = ring->init_context(ring,
ring->default_context);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
return ret;
}
}
}
if (ring->init_context == NULL)
return 0;
ret = ring->init_context(req);
} else
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
ret = i915_switch_context(req);
if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
return ret;
}
......@@ -485,10 +476,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
}
static inline int
mi_set_context(struct intel_engine_cs *ring,
struct intel_context *new_context,
u32 hw_flags)
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct intel_engine_cs *ring = req->ring;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
......@@ -503,13 +493,15 @@ mi_set_context(struct intel_engine_cs *ring,
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev)) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
else if (INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
......@@ -517,7 +509,7 @@ mi_set_context(struct intel_engine_cs *ring,
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
ret = intel_ring_begin(ring, len);
ret = intel_ring_begin(req, len);
if (ret)
return ret;
......@@ -540,7 +532,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
......@@ -621,9 +613,10 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
return false;
}
static int do_switch(struct intel_engine_cs *ring,
struct intel_context *to)
static int do_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
......@@ -659,7 +652,7 @@ static int do_switch(struct intel_engine_cs *ring,
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
......@@ -701,7 +694,7 @@ static int do_switch(struct intel_engine_cs *ring,
WARN_ON(needs_pd_load_pre(ring, to) &&
needs_pd_load_post(ring, to, hw_flags));
ret = mi_set_context(ring, to, hw_flags);
ret = mi_set_context(req, hw_flags);
if (ret)
goto unpin_out;
......@@ -710,7 +703,7 @@ static int do_switch(struct intel_engine_cs *ring,
*/
if (needs_pd_load_post(ring, to, hw_flags)) {
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
......@@ -726,7 +719,7 @@ static int do_switch(struct intel_engine_cs *ring,
if (!(to->remap_slice & (1<<i)))
continue;
ret = i915_gem_l3_remap(ring, i);
ret = i915_gem_l3_remap(req, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
......@@ -742,7 +735,7 @@ static int do_switch(struct intel_engine_cs *ring,
*/
if (from != NULL) {
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
......@@ -766,7 +759,7 @@ static int do_switch(struct intel_engine_cs *ring,
if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(ring, to);
ret = ring->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
......@@ -782,8 +775,7 @@ static int do_switch(struct intel_engine_cs *ring,
/**
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @to: the context to switch to
* @req: request for which we'll execute the context switch
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
......@@ -794,25 +786,25 @@ static int do_switch(struct intel_engine_cs *ring,
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to)
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (req->ctx != ring->last_context) {
i915_gem_context_reference(req->ctx);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
ring->last_context = to;
ring->last_context = req->ctx;
}
return 0;
}
return do_switch(ring, to);
return do_switch(req);
}
static bool contexts_enabled(struct drm_device *dev)
......@@ -898,6 +890,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BAN_PERIOD:
args->value = ctx->hang_stats.ban_period_seconds;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
break;
default:
ret = -EINVAL;
break;
......@@ -935,6 +930,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
ctx->hang_stats.ban_period_seconds = args->value;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size) {
ret = -EINVAL;
} else {
ctx->flags &= ~CONTEXT_NO_ZEROMAP;
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
}
break;
default:
ret = -EINVAL;
break;
......
This diff is collapsed.
This diff is collapsed.
......@@ -126,6 +126,8 @@ struct intel_rotation_info {
unsigned int pitch;
uint32_t pixel_format;
uint64_t fb_modifier;
unsigned int width_pages, height_pages;
uint64_t size;
};
struct i915_ggtt_view {
......@@ -205,19 +207,34 @@ struct i915_vma {
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
};
struct i915_page_table {
struct i915_page_dma {
struct page *page;
union {
dma_addr_t daddr;
/* For gen6/gen7 only. This is the offset in the GGTT
* where the page directory entries for PPGTT begin
*/
uint32_t ggtt_offset;
};
};
#define px_base(px) (&(px)->base)
#define px_page(px) (px_base(px)->page)
#define px_dma(px) (px_base(px)->daddr)
struct i915_page_scratch {
struct i915_page_dma base;
};
struct i915_page_table {
struct i915_page_dma base;
unsigned long *used_ptes;
};
struct i915_page_directory {
struct page *page; /* NULL for GEN6-GEN7 */
union {
uint32_t pd_offset;
dma_addr_t daddr;
};
struct i915_page_dma base;
unsigned long *used_pdes;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
......@@ -233,13 +250,12 @@ struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
unsigned long start; /* Start offset always 0 for dri2 */
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
struct {
dma_addr_t addr;
struct page *page;
} scratch;
struct i915_page_scratch *scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
/**
* List of objects currently involved in rendering.
......@@ -300,9 +316,9 @@ struct i915_address_space {
*/
struct i915_gtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
unsigned long mappable_end; /* End offset that we can CPU map */
size_t stolen_size; /* Total size of stolen memory */
u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
......@@ -314,9 +330,9 @@ struct i915_gtt {
int mtrr;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
u64 *mappable_end);
};
struct i915_hw_ppgtt {
......@@ -329,16 +345,13 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd;
};
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
struct drm_i915_file_private *file_priv;
gen6_pte_t __iomem *pd_addr;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring);
struct drm_i915_gem_request *req);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
......@@ -468,6 +481,14 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
{
return test_bit(n, ppgtt->pdp.used_pdpes) ?
px_dma(ppgtt->pdp.page_directory[n]) :
px_dma(ppgtt->base.scratch_pd);
}
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
......@@ -475,6 +496,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
......
......@@ -152,29 +152,26 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
return 0;
}
int i915_gem_render_state_init(struct intel_engine_cs *ring)
int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
struct render_state so;
int ret;
ret = i915_gem_render_state_prepare(ring, &so);
ret = i915_gem_render_state_prepare(req->ring, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
ret = ring->dispatch_execbuffer(ring,
so.ggtt_offset,
ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
ret = __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
return ret;
......
......@@ -39,7 +39,7 @@ struct render_state {
int gen;
};
int i915_gem_render_state_init(struct intel_engine_cs *ring);
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so);
......
......@@ -42,6 +42,31 @@
* for is a boon.
*/
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
DRM_MM_SEARCH_DEFAULT);
mutex_unlock(&dev_priv->mm.stolen_lock);
return ret;
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node)
{
mutex_lock(&dev_priv->mm.stolen_lock);
drm_mm_remove_node(node);
mutex_unlock(&dev_priv->mm.stolen_lock);
}
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -151,134 +176,6 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base;
}
static int find_compression_threshold(struct drm_device *dev,
struct drm_mm_node *node,
int size,
int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int compression_threshold = 1;
int ret;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
*
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds, therefore, in that case, we only have 1 resort.
*/
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret == 0)
return compression_threshold;
again:
/* HW's ability to limit the CFB is 1:4 */
if (compression_threshold > 4 ||
(fb_cpp == 2 && compression_threshold == 2))
return 0;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT);
if (ret && INTEL_INFO(dev)->gen <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
goto again;
} else {
return compression_threshold;
}
}
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
dev_priv->fbc.threshold = ret;
if (INTEL_INFO(dev_priv)->gen >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
4096, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
goto err_fb;
dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
dev_priv->fbc.uncompressed_size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
return 0;
err_fb:
kfree(compressed_llb);
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
if (size <= dev_priv->fbc.uncompressed_size)
return 0;
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
return i915_setup_compression(dev, size, fb_cpp);
}
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->fbc.uncompressed_size == 0)
return;
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
}
dev_priv->fbc.uncompressed_size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -286,7 +183,6 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
i915_gem_stolen_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
}
......@@ -296,6 +192,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
u32 tmp;
int bios_reserved = 0;
mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
......@@ -386,8 +284,10 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
if (obj->stolen) {
drm_mm_remove_node(obj->stolen);
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
......@@ -448,8 +348,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!stolen)
return NULL;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
4096, DRM_MM_SEARCH_DEFAULT);
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
kfree(stolen);
return NULL;
......@@ -459,7 +358,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (obj)
return obj;
drm_mm_remove_node(stolen);
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
......@@ -494,7 +393,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
stolen->start = stolen_offset;
stolen->size = size;
mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
mutex_unlock(&dev_priv->mm.stolen_lock);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
......@@ -504,7 +405,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
drm_mm_remove_node(stolen);
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
......@@ -545,7 +446,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
err_vma:
i915_gem_vma_destroy(vma);
err_out:
drm_mm_remove_node(stolen);
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
drm_gem_object_unreference(&obj->base);
return NULL;
......
......@@ -35,107 +35,20 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
u32 cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer32_t;
static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_batchbuffer32_t batchbuffer32;
drm_i915_batchbuffer_t __user *batchbuffer;
if (copy_from_user
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
return -EFAULT;
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
|| __put_user(batchbuffer32.start, &batchbuffer->start)
|| __put_user(batchbuffer32.used, &batchbuffer->used)
|| __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
|| __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
|| __put_user(batchbuffer32.num_cliprects,
&batchbuffer->num_cliprects)
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
&batchbuffer->cliprects))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long)batchbuffer);
}
typedef struct _drm_i915_cmdbuffer32 {
u32 buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
u32 cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer32_t;
static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_cmdbuffer32_t cmdbuffer32;
drm_i915_cmdbuffer_t __user *cmdbuffer;
if (copy_from_user
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
return -EFAULT;
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
|| __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
&cmdbuffer->buf)
|| __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
|| __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
|| __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
|| __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
|| __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
&cmdbuffer->cliprects))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
(unsigned long)cmdbuffer);
}
typedef struct drm_i915_irq_emit32 {
u32 irq_seq;
} drm_i915_irq_emit32_t;
static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_irq_emit32_t req32;
drm_i915_irq_emit_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user((int __user *)(unsigned long)req32.irq_seq,
&request->irq_seq))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
(unsigned long)request);
}
typedef struct drm_i915_getparam32 {
int param;
struct drm_i915_getparam32 {
s32 param;
/*
* We screwed up the generic ioctl struct here and used a variable-sized
* pointer. Use u32 in the compat struct to match the 32bit pointer
* userspace expects.
*/
u32 value;
} drm_i915_getparam32_t;
};
static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_getparam32_t req32;
struct drm_i915_getparam32 req32;
drm_i915_getparam_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
......@@ -152,41 +65,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
(unsigned long)request);
}
typedef struct drm_i915_mem_alloc32 {
int region;
int alignment;
int size;
u32 region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc32_t;
static int compat_i915_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_mem_alloc32_t req32;
drm_i915_mem_alloc_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.region, &request->region)
|| __put_user(req32.alignment, &request->alignment)
|| __put_user(req32.size, &request->size)
|| __put_user((void __user *)(unsigned long)req32.region_offset,
&request->region_offset))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
(unsigned long)request);
}
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
[DRM_I915_ALLOC] = compat_i915_alloc
};
/**
......
This diff is collapsed.
......@@ -28,7 +28,6 @@ struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
......@@ -52,13 +51,12 @@ struct i915_params i915 __read_mostly = {
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
.edp_vswing = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
......@@ -84,11 +82,6 @@ MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
......@@ -104,7 +97,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
module_param_named(reset, i915.reset, bool, 0600);
module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
......@@ -182,10 +175,6 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
MODULE_PARM_DESC(nuclear_pageflip,
"Force atomic modeset functionality; only planes work for now (default: false).");
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
MODULE_PARM_DESC(edp_vswing,
......
This diff is collapsed.
......@@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
}
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_disable(dev);
intel_fbc_disable(dev_priv);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
......
......@@ -64,23 +64,15 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
goto out;
}
units = 0;
div = 1000000ULL;
if (IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
/* Special case for 320Mhz */
if (czcount_30ns == 1) {
div = 10000000ULL;
units = 3125ULL;
} else {
/* chv counts are one less */
czcount_30ns += 1;
div = 1000000ULL;
units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
}
}
if (units == 0)
units = DIV_ROUND_UP_ULL(30ULL * bias,
(u64)czcount_30ns);
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
......
......@@ -424,10 +424,10 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to,
TP_PROTO(struct drm_i915_gem_request *to_req,
struct intel_engine_cs *from,
struct drm_i915_gem_request *req),
TP_ARGS(from, to, req),
TP_ARGS(to_req, from, req),
TP_STRUCT__entry(
__field(u32, dev)
......@@ -439,7 +439,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
__entry->sync_to = to_req->ring->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
......@@ -475,8 +475,8 @@ TRACE_EVENT(i915_gem_ring_dispatch,
);
TRACE_EVENT(i915_gem_ring_flush,
TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
TP_ARGS(ring, invalidate, flush),
TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
TP_ARGS(req, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
......@@ -486,8 +486,8 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->dev = req->ring->dev->primary->index;
__entry->ring = req->ring->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
......
This diff is collapsed.
This diff is collapsed.
......@@ -41,7 +41,8 @@
*
* The disable sequences must be performed before disabling the transcoder or
* port. The enable sequences may only be performed after enabling the
* transcoder and port, and after completed link training.
* transcoder and port, and after completed link training. Therefore the audio
* enable/disable sequences are part of the modeset sequence.
*
* The codec and controller sequences could be done either parallel or serial,
* but generally the ELDV/PD change in the codec sequence indicates to the audio
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -3,8 +3,8 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, unsigned long *mappable_end);
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, u64 *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment