Commit 52b820d9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull intel DRM fixes from Dave Airlie:
 "Just intel fixes, including getting the Ironlake systems back to the
  state they were in for 3.6."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/i915: Revert shrinker changes from "Track unbound pages"
  drm/i915: Use pixel size for computing linear offsets into a sprite
  drm/i915: Add DEBUG messages to all intel_create_user_framebuffer error paths
  drm/i915: The sprite scaler on Ironlake also support YUV planes
  drm: Only evict the blocks required to create the requested hole
  drm/i915: Treat crtc->mode.clock == 0 as disabled
  Revert "drm/i915: no lvds quirk for Zotac ZDBOX SD ID12/ID13"
  drm/i915; Only increment the user-pin-count after successfully pinning the bo
parents 47ecfcb7 82ba789f
...@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, ...@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(!hole_node->hole_follows || node->allocated); BUG_ON(!hole_node->hole_follows || node->allocated);
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (adj_start < start) if (adj_start < start)
adj_start = start; adj_start = start;
if (adj_end > end)
adj_end = end;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) { if (alignment) {
unsigned tmp = adj_start % alignment; unsigned tmp = adj_start % alignment;
...@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm, ...@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
mm->scan_size = size; mm->scan_size = size;
mm->scanned_blocks = 0; mm->scanned_blocks = 0;
mm->scan_hit_start = 0; mm->scan_hit_start = 0;
mm->scan_hit_size = 0; mm->scan_hit_end = 0;
mm->scan_check_range = 0; mm->scan_check_range = 0;
mm->prev_scanned_node = NULL; mm->prev_scanned_node = NULL;
} }
...@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, ...@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
mm->scan_size = size; mm->scan_size = size;
mm->scanned_blocks = 0; mm->scanned_blocks = 0;
mm->scan_hit_start = 0; mm->scan_hit_start = 0;
mm->scan_hit_size = 0; mm->scan_hit_end = 0;
mm->scan_start = start; mm->scan_start = start;
mm->scan_end = end; mm->scan_end = end;
mm->scan_check_range = 1; mm->scan_check_range = 1;
...@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) ...@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm; struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node; struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end; unsigned long hole_start, hole_end;
unsigned long adj_start; unsigned long adj_start, adj_end;
unsigned long adj_end;
mm->scanned_blocks++; mm->scanned_blocks++;
...@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) ...@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->node_list.next = &mm->prev_scanned_node->node_list; node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node; mm->prev_scanned_node = node;
hole_start = drm_mm_hole_node_start(prev_node); adj_start = hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node); adj_end = hole_end = drm_mm_hole_node_end(prev_node);
adj_start = hole_start;
adj_end = hole_end;
if (mm->color_adjust)
mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
if (mm->scan_check_range) { if (mm->scan_check_range) {
if (adj_start < mm->scan_start) if (adj_start < mm->scan_start)
...@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) ...@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
adj_end = mm->scan_end; adj_end = mm->scan_end;
} }
if (mm->color_adjust)
mm->color_adjust(prev_node, mm->scan_color,
&adj_start, &adj_end);
if (check_free_hole(adj_start, adj_end, if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) { mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start; mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end; mm->scan_hit_end = hole_end;
return 1; return 1;
} }
...@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node) ...@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
node_list); node_list);
prev_node->hole_follows = node->scanned_preceeds_hole; prev_node->hole_follows = node->scanned_preceeds_hole;
INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list); list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
* complete resulting free block (not just the desired part) is node->start < mm->scan_hit_end);
* stored. */
if (node->start >= mm->scan_hit_start &&
node->start + node->size
<= mm->scan_hit_start + mm->scan_hit_size) {
return 1;
}
return 0;
} }
EXPORT_SYMBOL(drm_mm_scan_remove_block); EXPORT_SYMBOL(drm_mm_scan_remove_block);
......
...@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
} }
static long static long
i915_gem_purge(struct drm_i915_private *dev_priv, long target) __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{ {
struct drm_i915_gem_object *obj, *next; struct drm_i915_gem_object *obj, *next;
long count = 0; long count = 0;
...@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) ...@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list, &dev_priv->mm.unbound_list,
gtt_list) { gtt_list) {
if (i915_gem_object_is_purgeable(obj) && if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) { i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
if (count >= target) if (count >= target)
...@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) ...@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, &dev_priv->mm.inactive_list,
mm_list) { mm_list) {
if (i915_gem_object_is_purgeable(obj) && if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 && i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) { i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
...@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) ...@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return count; return count;
} }
static long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
return __i915_gem_shrink(dev_priv, target, true);
}
static void static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv) i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{ {
...@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out; goto out;
} }
obj->user_pin_count++; if (obj->user_pin_count == 0) {
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment, true, false); ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret) if (ret)
goto out; goto out;
} }
obj->user_pin_count++;
obj->pin_filp = file;
/* XXX - flush the CPU caches for pinned objects /* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet * as the X server doesn't manage domains yet
*/ */
...@@ -4394,6 +4402,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -4394,6 +4402,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (nr_to_scan) { if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (nr_to_scan > 0) if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv); i915_gem_shrink_all(dev_priv);
} }
...@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0) if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT; cnt += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0) if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT; cnt += obj->base.size >> PAGE_SHIFT;
......
...@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev, ...@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
{ {
int ret; int ret;
if (obj->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL; return -EINVAL;
}
if (mode_cmd->pitches[0] & 63) if (mode_cmd->pitches[0] & 63) {
DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
mode_cmd->pitches[0]);
return -EINVAL; return -EINVAL;
}
/* FIXME <= Gen4 stride limits are bit unclear */ /* FIXME <= Gen4 stride limits are bit unclear */
if (mode_cmd->pitches[0] > 32768) if (mode_cmd->pitches[0] > 32768) {
DRM_DEBUG("pitch (%d) must be at less than 32768\n",
mode_cmd->pitches[0]);
return -EINVAL; return -EINVAL;
}
if (obj->tiling_mode != I915_TILING_NONE && if (obj->tiling_mode != I915_TILING_NONE &&
mode_cmd->pitches[0] != obj->stride) mode_cmd->pitches[0] != obj->stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride);
return -EINVAL; return -EINVAL;
}
/* Reject formats not supported by any plane early. */ /* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) { switch (mode_cmd->pixel_format) {
...@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev, ...@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
break; break;
case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ARGB1555:
if (INTEL_INFO(dev)->gen > 3) if (INTEL_INFO(dev)->gen > 3) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL; return -EINVAL;
}
break; break;
case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888: case DRM_FORMAT_ABGR8888:
...@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev, ...@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_ABGR2101010:
if (INTEL_INFO(dev)->gen < 4) if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL; return -EINVAL;
}
break; break;
case DRM_FORMAT_YUYV: case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY: case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU: case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY: case DRM_FORMAT_VYUY:
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 5) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL; return -EINVAL;
}
break; break;
default: default:
DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL; return -EINVAL;
} }
......
...@@ -774,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = { ...@@ -774,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
}, },
}, },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "ZOTAC ZBOXSD-ID12/ID13",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
},
},
{ {
.callback = intel_no_lvds_dmi_callback, .callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD", .ident = "Gigabyte GA-D525TUD",
......
...@@ -44,6 +44,14 @@ ...@@ -44,6 +44,14 @@
* i915.i915_enable_fbc parameter * i915.i915_enable_fbc parameter
*/ */
static bool intel_crtc_active(struct drm_crtc *crtc)
{
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*/
return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
}
static void i8xx_disable_fbc(struct drm_device *dev) static void i8xx_disable_fbc(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev) ...@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.) * - going to an unsupported config (interlace, pixel multiply, etc.)
*/ */
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (to_intel_crtc(tmp_crtc)->active && if (intel_crtc_active(tmp_crtc) &&
!to_intel_crtc(tmp_crtc)->primary_disabled && !to_intel_crtc(tmp_crtc)->primary_disabled) {
tmp_crtc->fb) {
if (crtc) { if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
...@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) ...@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL; struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (to_intel_crtc(crtc)->active && crtc->fb) { if (intel_crtc_active(crtc)) {
if (enabled) if (enabled)
return NULL; return NULL;
enabled = crtc; enabled = crtc;
...@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, ...@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss; int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane); crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) { if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size; *cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size; *plane_wm = display->guard_size;
return false; return false;
...@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev, ...@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries; int entries;
crtc = intel_get_crtc_for_plane(dev, plane); crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) if (!intel_crtc_active(crtc))
return false; return false;
clock = crtc->mode.clock; /* VESA DOT Clock */ clock = crtc->mode.clock; /* VESA DOT Clock */
...@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev) ...@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0); fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0); crtc = intel_get_crtc_for_plane(dev, 0);
if (to_intel_crtc(crtc)->active && crtc->fb) { if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8; int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev)) if (IS_GEN2(dev))
cpp = 4; cpp = 4;
...@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev) ...@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1); fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1); crtc = intel_get_crtc_for_plane(dev, 1);
if (to_intel_crtc(crtc)->active && crtc->fb) { if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8; int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev)) if (IS_GEN2(dev))
cpp = 4; cpp = 4;
...@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, ...@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss; int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane); crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) { if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size; *sprite_wm = display->guard_size;
return false; return false;
} }
......
...@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, ...@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset = sprsurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y, intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8, pixel_size, fb->pitches[0]);
fb->pitches[0]);
linear_offset -= sprsurf_offset; linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
...@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, ...@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset = dvssurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y, intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8, pixel_size, fb->pitches[0]);
fb->pitches[0]);
linear_offset -= dvssurf_offset; linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
......
...@@ -70,7 +70,7 @@ struct drm_mm { ...@@ -70,7 +70,7 @@ struct drm_mm {
unsigned long scan_color; unsigned long scan_color;
unsigned long scan_size; unsigned long scan_size;
unsigned long scan_hit_start; unsigned long scan_hit_start;
unsigned scan_hit_size; unsigned long scan_hit_end;
unsigned scanned_blocks; unsigned scanned_blocks;
unsigned long scan_start; unsigned long scan_start;
unsigned long scan_end; unsigned long scan_end;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment