Commit e76e9aeb authored by Ben Widawsky's avatar Ben Widawsky Committed by Daniel Vetter

drm/i915: Stop using AGP layer for GEN6+

As a quick hack we make the old intel_gtt structure mutable so we can
fool a bunch of the existing code which depends on elements in that data
structure. We can/should try to remove this in a subsequent patch.

This should preserve the old gtt init behavior which upon writing these
patches seems incorrect. The next patch will fix these things.

The one exception is VLV which doesn't have the preserved flush control
write behavior. Since we want to do that for all GEN6+ stuff, we'll
handle that in a later patch. Mainstream VLV support doesn't actually
exist yet anyway.

v2: Update the comment to remove the "voodoo"
Check that the last pte written matches what we readback

v3: actually kill cache_level_to_agp_type since most of the flags will
disappear in an upcoming patch

v4: v3 was actually not what we wanted (Daniel)
Make the ggtt bind assertions better and stricter (Chris)
Fix some uncaught errors at gtt init (Chris)
Some other random stuff that Chris wanted

v5: check for i==0 in gen6_ggtt_bind_object to shut up gcc (Ben)
Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
Reviewed-by [v4]: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Make the cache_level -> agp_flags conversion for pre-gen6 a
tad more robust by mapping everything != CACHE_NONE to the cached agp
flag - we have a 1:1 uncached mapping, but different modes of
cacheable (at least on later generations). Suggested by Chris Wilson.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent b3fcabb1
...@@ -1686,7 +1686,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, ...@@ -1686,7 +1686,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
} }
EXPORT_SYMBOL(intel_gmch_probe); EXPORT_SYMBOL(intel_gmch_probe);
const struct intel_gtt *intel_gtt_get(void) struct intel_gtt *intel_gtt_get(void)
{ {
return &intel_private.base; return &intel_private.base;
} }
......
...@@ -1496,19 +1496,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1496,19 +1496,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv; goto free_priv;
} }
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); ret = i915_gem_gtt_init(dev);
if (!ret) { if (ret)
DRM_ERROR("failed to set up gmch\n");
ret = -EIO;
goto put_bridge; goto put_bridge;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto put_gmch;
}
i915_kick_out_firmware_fb(dev_priv); i915_kick_out_firmware_fb(dev_priv);
...@@ -1683,7 +1673,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1683,7 +1673,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
out_rmmap: out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch: put_gmch:
intel_gmch_remove(); i915_gem_gtt_fini(dev);
put_bridge: put_bridge:
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
free_priv: free_priv:
......
...@@ -746,7 +746,7 @@ typedef struct drm_i915_private { ...@@ -746,7 +746,7 @@ typedef struct drm_i915_private {
struct { struct {
/** Bridge to intel-gtt-ko */ /** Bridge to intel-gtt-ko */
const struct intel_gtt *gtt; struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */ /** Memory allocator for GTT stolen memory */
struct drm_mm stolen; struct drm_mm stolen;
/** Memory allocator for GTT */ /** Memory allocator for GTT */
...@@ -1538,6 +1538,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev, ...@@ -1538,6 +1538,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end,
unsigned long end); unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
extern inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
......
...@@ -845,12 +845,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -845,12 +845,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* domain anymore. */ * domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
} }
} }
if (needs_clflush_after) if (needs_clflush_after)
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
return ret; return ret;
} }
...@@ -3058,7 +3058,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) ...@@ -3058,7 +3058,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return; return;
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
intel_gtt_chipset_flush(); i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0; obj->base.write_domain = 0;
...@@ -3959,7 +3959,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -3959,7 +3959,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
if (!intel_enable_gtt()) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO; return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
...@@ -4294,7 +4294,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, ...@@ -4294,7 +4294,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_cache_release(page); page_cache_release(page);
} }
} }
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
obj->phys_obj->cur_obj = NULL; obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL; obj->phys_obj = NULL;
...@@ -4381,7 +4381,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, ...@@ -4381,7 +4381,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
return -EFAULT; return -EFAULT;
} }
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
return 0; return 0;
} }
......
...@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
} }
if (flush_domains & I915_GEM_DOMAIN_CPU) if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush(); i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT) if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb(); wmb();
......
...@@ -262,26 +262,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, ...@@ -262,26 +262,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
} }
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
{
switch (cache_level) {
case I915_CACHE_LLC_MLC:
/* Older chipsets do not have this extra level of CPU
* cacheing, so fallthrough and request the PTE simply
* as cached.
*/
if (INTEL_INFO(dev)->gen >= 6 && !IS_HASWELL(dev))
return AGP_USER_CACHED_MEMORY_LLC_MLC;
case I915_CACHE_LLC:
return AGP_USER_CACHED_MEMORY;
default:
case I915_CACHE_NONE:
return AGP_USER_MEMORY;
}
}
static bool do_idling(struct drm_i915_private *dev_priv) static bool do_idling(struct drm_i915_private *dev_priv)
{ {
bool ret = dev_priv->mm.interruptible; bool ret = dev_priv->mm.interruptible;
...@@ -304,13 +284,38 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) ...@@ -304,13 +284,38 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible; dev_priv->mm.interruptible = interruptible;
} }
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned first_entry,
unsigned num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
return;
}
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte));
readl(gtt_base);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev) void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */ /* First fill our portion of the GTT with scratch pages */
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
...@@ -318,7 +323,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ...@@ -318,7 +323,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level); i915_gem_gtt_bind_object(obj, obj->cache_level);
} }
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
} }
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
...@@ -334,21 +339,69 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) ...@@ -334,21 +339,69 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
for_each_sg(st->sgl, sg, st->nents, unused) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
gtt_entries[i] = pte_encode(dev, addr, level);
i++;
}
}
BUG_ON(i > max_entries);
BUG_ON(i != obj->base.size / PAGE_SIZE);
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); if (INTEL_INFO(dev)->gen < 6) {
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
flags);
} else {
gen6_ggtt_bind_object(obj, cache_level);
}
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
obj->has_global_gtt_mapping = 1; obj->has_global_gtt_mapping = 1;
} }
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{ {
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, i915_ggtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0; obj->has_global_gtt_mapping = 0;
...@@ -406,5 +459,153 @@ void i915_gem_init_global_gtt(struct drm_device *dev, ...@@ -406,5 +459,153 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */ /* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct page *page;
dma_addr_t dma_addr;
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
get_page(page);
set_pages_uc(page, 1);
#ifdef CONFIG_INTEL_IOMMU
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dma_addr))
return -EINVAL;
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->mm.gtt->scratch_page = page;
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
return 0;
}
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->mm.gtt->scratch_page);
__free_page(dev_priv->mm.gtt->scratch_page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
u16 snb_gmch_ctl;
u32 tmp;
int ret;
/* On modern platforms we need not worry ourself with the legacy
* hostbridge query stuff. Skip it entirely
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
intel_gmch_remove();
return -ENODEV;
}
return 0;
}
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20);
pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
dev_priv->mm.gtt->gtt_total_entries =
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
/* 64/512MB is the current min/max we actually know of, but this is just a
* coarse sanity check.
*/
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
DRM_ERROR("Unknown GMADR entries (%d)\n",
dev_priv->mm.gtt->gtt_mappable_entries);
ret = -ENXIO;
goto err_out;
}
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
goto err_out;
}
dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gtt->gtt) {
DRM_ERROR("Failed to map the gtt page table\n");
teardown_scratch_page(dev);
ret = -ENOMEM;
goto err_out;
}
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
DRM_INFO("Memory Usable by graphics device = %dK\n", dev_priv->mm.gtt->gtt_total_entries >> 10);
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
return 0;
err_out:
kfree(dev_priv->mm.gtt);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
return ret;
}
void i915_gem_gtt_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->mm.gtt->gtt);
teardown_scratch_page(dev);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
kfree(dev_priv->mm.gtt);
} }
...@@ -41,6 +41,12 @@ ...@@ -41,6 +41,12 @@
*/ */
#define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1) #define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
/* PCI config space */ /* PCI config space */
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#ifndef _DRM_INTEL_GTT_H #ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H
const struct intel_gtt { struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */ /* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size; unsigned int stolen_size;
/* Total number of gtt entries. */ /* Total number of gtt entries. */
...@@ -17,6 +17,7 @@ const struct intel_gtt { ...@@ -17,6 +17,7 @@ const struct intel_gtt {
unsigned int do_idle_maps : 1; unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */ /* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma; dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* for ppgtt PDE access */ /* for ppgtt PDE access */
u32 __iomem *gtt; u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */ /* needed for ioremap in drm/i915 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment