Commit 3a93dc42 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/sis: add pciid for SiS 662/671 chipset
  drm: add new rv380 pciid
  drm: add support for passing state into the suspend hooks.
  drm/i915: Fix hibernate save/restore of VGA attribute regs
  drm/i915 more registers for S3 (DSPCLK_GATE_D, CACHE_MODE_0, MI_ARB_STATE)
  drm/i915: restore pipeconf regs unconditionally
  drm/i915: save/restore interrupt state
  drm: convert drm from nopage to fault.
  i915: wrap chipset types requiring hw status set ioctl
  drm/radeon: add initial rs690 support to drm.
parents cf8c0d1d feac7af5
...@@ -568,7 +568,7 @@ struct drm_driver { ...@@ -568,7 +568,7 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *); void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *); void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *); int (*unload) (struct drm_device *);
int (*suspend) (struct drm_device *); int (*suspend) (struct drm_device *, pm_message_t state);
int (*resume) (struct drm_device *); int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
void (*dma_ready) (struct drm_device *); void (*dma_ready) (struct drm_device *);
......
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
...@@ -236,6 +237,7 @@ ...@@ -236,6 +237,7 @@
{0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0, 0, 0} {0, 0, 0}
#define r128_PCI_IDS \ #define r128_PCI_IDS \
...@@ -313,6 +315,7 @@ ...@@ -313,6 +315,7 @@
{0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
{0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
{0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
......
...@@ -36,7 +36,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state) ...@@ -36,7 +36,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
printk(KERN_ERR "%s\n", __FUNCTION__); printk(KERN_ERR "%s\n", __FUNCTION__);
if (drm_dev->driver->suspend) if (drm_dev->driver->suspend)
return drm_dev->driver->suspend(drm_dev); return drm_dev->driver->suspend(drm_dev, state);
return 0; return 0;
} }
......
...@@ -66,7 +66,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) ...@@ -66,7 +66,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
} }
/** /**
* \c nopage method for AGP virtual memory. * \c fault method for AGP virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
...@@ -76,8 +76,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) ...@@ -76,8 +76,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it. * map, get the page, increment the use count and return it.
*/ */
#if __OS_HAS_AGP #if __OS_HAS_AGP
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->head->dev; struct drm_device *dev = priv->head->dev;
...@@ -89,19 +88,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -89,19 +88,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* Find the right map * Find the right map
*/ */
if (!drm_core_has_AGP(dev)) if (!drm_core_has_AGP(dev))
goto vm_nopage_error; goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture) if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error; goto vm_fault_error;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_nopage_error; goto vm_fault_error;
r_list = drm_hash_entry(hash, struct drm_map_list, hash); r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map; map = r_list->map;
if (map && map->type == _DRM_AGP) { if (map && map->type == _DRM_AGP) {
unsigned long offset = address - vma->vm_start; /*
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
unsigned long offset = (unsigned long)vmf->virtual_address -
vma->vm_start;
unsigned long baddr = map->offset + offset; unsigned long baddr = map->offset + offset;
struct drm_agp_mem *agpmem; struct drm_agp_mem *agpmem;
struct page *page; struct page *page;
...@@ -123,7 +127,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -123,7 +127,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
} }
if (!agpmem) if (!agpmem)
goto vm_nopage_error; goto vm_fault_error;
/* /*
* Get the page, inc the use count, and return it * Get the page, inc the use count, and return it
...@@ -131,22 +135,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -131,22 +135,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
offset = (baddr - agpmem->bound) >> PAGE_SHIFT; offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = virt_to_page(__va(agpmem->memory->memory[offset])); page = virt_to_page(__va(agpmem->memory->memory[offset]));
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG DRM_DEBUG
("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
baddr, __va(agpmem->memory->memory[offset]), offset, baddr, __va(agpmem->memory->memory[offset]), offset,
page_count(page)); page_count(page));
return 0;
return page;
} }
vm_nopage_error: vm_fault_error:
return NOPAGE_SIGBUS; /* Disallow mremap */ return VM_FAULT_SIGBUS; /* Disallow mremap */
} }
#else /* __OS_HAS_AGP */ #else /* __OS_HAS_AGP */
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
return NOPAGE_SIGBUS; return VM_FAULT_SIGBUS;
} }
#endif /* __OS_HAS_AGP */ #endif /* __OS_HAS_AGP */
...@@ -160,28 +163,26 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -160,28 +163,26 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* Get the mapping, find the real physical page to map, get the page, and * Get the mapping, find the real physical page to map, get the page, and
* return it. * return it.
*/ */
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_map *map = (struct drm_map *) vma->vm_private_data;
unsigned long offset; unsigned long offset;
unsigned long i; unsigned long i;
struct page *page; struct page *page;
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map) if (!map)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; offset = (unsigned long)vmf->virtual_address - vma->vm_start;
i = (unsigned long)map->handle + offset; i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i); page = vmalloc_to_page((void *)i);
if (!page) if (!page)
return NOPAGE_SIGBUS; return VM_FAULT_SIGBUS;
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG("0x%lx\n", address); DRM_DEBUG("shm_fault 0x%lx\n", offset);
return page; return 0;
} }
/** /**
...@@ -263,7 +264,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) ...@@ -263,7 +264,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
} }
/** /**
* \c nopage method for DMA virtual memory. * \c fault method for DMA virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
...@@ -271,8 +272,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) ...@@ -271,8 +272,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
* *
* Determine the page number from the page offset and get it from drm_device_dma::pagelist. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/ */
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->head->dev; struct drm_device *dev = priv->head->dev;
...@@ -282,24 +282,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, ...@@ -282,24 +282,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
struct page *page; struct page *page;
if (!dma) if (!dma)
return NOPAGE_SIGBUS; /* Error */ return VM_FAULT_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist) if (!dma->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
return page; return 0;
} }
/** /**
* \c nopage method for scatter-gather virtual memory. * \c fault method for scatter-gather virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
...@@ -307,8 +306,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, ...@@ -307,8 +306,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
* *
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/ */
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_map *map = (struct drm_map *) vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
...@@ -320,77 +318,64 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, ...@@ -320,77 +318,64 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
struct page *page; struct page *page;
if (!entry) if (!entry)
return NOPAGE_SIGBUS; /* Error */ return VM_FAULT_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist) if (!entry->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; offset = (unsigned long)vmf->virtual_address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual; map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset]; page = entry->pagelist[page_offset];
get_page(page); get_page(page);
vmf->page = page;
return page; return 0;
} }
static struct page *drm_vm_nopage(struct vm_area_struct *vma, static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_nopage(vma, address);
} }
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_shm_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_shm_nopage(vma, address);
} }
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_dma_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_dma_nopage(vma, address);
} }
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_sg_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_sg_nopage(vma, address);
} }
/** AGP virtual memory operations */ /** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = { static struct vm_operations_struct drm_vm_ops = {
.nopage = drm_vm_nopage, .fault = drm_vm_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
/** Shared virtual memory operations */ /** Shared virtual memory operations */
static struct vm_operations_struct drm_vm_shm_ops = { static struct vm_operations_struct drm_vm_shm_ops = {
.nopage = drm_vm_shm_nopage, .fault = drm_vm_shm_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_shm_close, .close = drm_vm_shm_close,
}; };
/** DMA virtual memory operations */ /** DMA virtual memory operations */
static struct vm_operations_struct drm_vm_dma_ops = { static struct vm_operations_struct drm_vm_dma_ops = {
.nopage = drm_vm_dma_nopage, .fault = drm_vm_dma_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
/** Scatter-gather virtual memory operations */ /** Scatter-gather virtual memory operations */
static struct vm_operations_struct drm_vm_sg_ops = { static struct vm_operations_struct drm_vm_sg_ops = {
.nopage = drm_vm_sg_nopage, .fault = drm_vm_sg_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
...@@ -604,7 +589,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ...@@ -604,7 +589,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
/* /*
* On some platforms we can't talk to bus dma address from the CPU, so for * On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical * memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in nopage() * pages and mappings in fault()
*/ */
#if defined(__powerpc__) #if defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
...@@ -634,7 +619,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ...@@ -634,7 +619,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
break; break;
case _DRM_CONSISTENT: case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. But /* Consistent memory is really like shared memory. But
* it's allocated in a different way, so avoid nopage */ * it's allocated in a different way, so avoid fault */
if (remap_pfn_range(vma, vma->vm_start, if (remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(map->handle)), page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot)) vma->vm_end - vma->vm_start, vma->vm_page_prot))
......
...@@ -171,7 +171,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) ...@@ -171,7 +171,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
dev_priv->allow_batchbuffer = 1; dev_priv->allow_batchbuffer = 1;
/* Program Hardware Status Page */ /* Program Hardware Status Page */
if (!IS_G33(dev)) { if (!I915_NEED_GFX_HWS(dev)) {
dev_priv->status_page_dmah = dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
...@@ -720,6 +720,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data, ...@@ -720,6 +720,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data; drm_i915_hws_addr_t *hws = data;
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
if (!dev_priv) { if (!dev_priv) {
DRM_ERROR("called with no initialization\n"); DRM_ERROR("called with no initialization\n");
return -EINVAL; return -EINVAL;
......
...@@ -160,6 +160,7 @@ static void i915_save_vga(struct drm_device *dev) ...@@ -160,6 +160,7 @@ static void i915_save_vga(struct drm_device *dev)
dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
inb(st01); inb(st01);
outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
inb(st01);
/* Graphics controller registers */ /* Graphics controller registers */
for (i = 0; i < 9; i++) for (i = 0; i < 9; i++)
...@@ -225,6 +226,7 @@ static void i915_restore_vga(struct drm_device *dev) ...@@ -225,6 +226,7 @@ static void i915_restore_vga(struct drm_device *dev)
i915_write_ar(st01, i, dev_priv->saveAR[i], 0); i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
inb(st01); /* switch back to index mode */ inb(st01); /* switch back to index mode */
outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
inb(st01);
/* VGA color palette registers */ /* VGA color palette registers */
outb(dev_priv->saveDACMASK, VGA_DACMASK); outb(dev_priv->saveDACMASK, VGA_DACMASK);
...@@ -236,7 +238,7 @@ static void i915_restore_vga(struct drm_device *dev) ...@@ -236,7 +238,7 @@ static void i915_restore_vga(struct drm_device *dev)
} }
static int i915_suspend(struct drm_device *dev) static int i915_suspend(struct drm_device *dev, pm_message_t state)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
...@@ -247,6 +249,9 @@ static int i915_suspend(struct drm_device *dev) ...@@ -247,6 +249,9 @@ static int i915_suspend(struct drm_device *dev)
return -ENODEV; return -ENODEV;
} }
if (state.event == PM_EVENT_PRETHAW)
return 0;
pci_save_state(dev->pdev); pci_save_state(dev->pdev);
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
...@@ -276,6 +281,7 @@ static int i915_suspend(struct drm_device *dev) ...@@ -276,6 +281,7 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
} }
i915_save_palette(dev, PIPE_A); i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
/* Pipe & plane B info */ /* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
...@@ -303,6 +309,7 @@ static int i915_suspend(struct drm_device *dev) ...@@ -303,6 +309,7 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
} }
i915_save_palette(dev, PIPE_B); i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
/* CRT state */ /* CRT state */
dev_priv->saveADPA = I915_READ(ADPA); dev_priv->saveADPA = I915_READ(ADPA);
...@@ -329,12 +336,26 @@ static int i915_suspend(struct drm_device *dev) ...@@ -329,12 +336,26 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
/* VGA state */ /* VGA state */
dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0); dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1); dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV); dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */ /* Scratch space */
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2)); dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
...@@ -345,9 +366,11 @@ static int i915_suspend(struct drm_device *dev) ...@@ -345,9 +366,11 @@ static int i915_suspend(struct drm_device *dev)
i915_save_vga(dev); i915_save_vga(dev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */ /* Shut down the device */
pci_disable_device(dev->pdev); pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot); pci_set_power_state(dev->pdev, PCI_D3hot);
}
return 0; return 0;
} }
...@@ -400,8 +423,6 @@ static int i915_resume(struct drm_device *dev) ...@@ -400,8 +423,6 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
} }
if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) &&
(dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A); i915_restore_palette(dev, PIPE_A);
...@@ -444,10 +465,9 @@ static int i915_resume(struct drm_device *dev) ...@@ -444,10 +465,9 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
} }
if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) &&
(dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS))
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_A);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */ /* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBBASE, I915_READ(DSPBBASE)); I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
...@@ -485,6 +505,15 @@ static int i915_resume(struct drm_device *dev) ...@@ -485,6 +505,15 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV); I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
udelay(150); udelay(150);
/* Clock gating state */
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]); I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
......
...@@ -134,6 +134,7 @@ typedef struct drm_i915_private { ...@@ -134,6 +134,7 @@ typedef struct drm_i915_private {
u32 saveVBLANK_A; u32 saveVBLANK_A;
u32 saveVSYNC_A; u32 saveVSYNC_A;
u32 saveBCLRPAT_A; u32 saveBCLRPAT_A;
u32 savePIPEASTAT;
u32 saveDSPASTRIDE; u32 saveDSPASTRIDE;
u32 saveDSPASIZE; u32 saveDSPASIZE;
u32 saveDSPAPOS; u32 saveDSPAPOS;
...@@ -154,6 +155,7 @@ typedef struct drm_i915_private { ...@@ -154,6 +155,7 @@ typedef struct drm_i915_private {
u32 saveVBLANK_B; u32 saveVBLANK_B;
u32 saveVSYNC_B; u32 saveVSYNC_B;
u32 saveBCLRPAT_B; u32 saveBCLRPAT_B;
u32 savePIPEBSTAT;
u32 saveDSPBSTRIDE; u32 saveDSPBSTRIDE;
u32 saveDSPBSIZE; u32 saveDSPBSIZE;
u32 saveDSPBPOS; u32 saveDSPBPOS;
...@@ -182,6 +184,12 @@ typedef struct drm_i915_private { ...@@ -182,6 +184,12 @@ typedef struct drm_i915_private {
u32 saveFBC_LL_BASE; u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL; u32 saveFBC_CONTROL;
u32 saveFBC_CONTROL2; u32 saveFBC_CONTROL2;
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
u32 saveCACHE_MODE_0;
u32 saveDSPCLK_GATE_D;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16]; u32 saveSWF0[16];
u32 saveSWF1[16]; u32 saveSWF1[16];
u32 saveSWF2[3]; u32 saveSWF2[3];
...@@ -450,6 +458,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); ...@@ -450,6 +458,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
*/ */
#define DMA_FADD_S 0x20d4 #define DMA_FADD_S 0x20d4
/* Memory Interface Arbitration State
*/
#define MI_ARB_STATE 0x20e4
/* Cache mode 0 reg. /* Cache mode 0 reg.
* - Manipulating render cache behaviour is central * - Manipulating render cache behaviour is central
* to the concept of zone rendering, tuning this reg can help avoid * to the concept of zone rendering, tuning this reg can help avoid
...@@ -460,6 +472,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); ...@@ -460,6 +472,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
* bit of interest either set or cleared. EG: (BIT<<16) | BIT to set. * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
*/ */
#define Cache_Mode_0 0x2120 #define Cache_Mode_0 0x2120
#define CACHE_MODE_0 0x2120
#define CM0_MASK_SHIFT 16 #define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_ZR_OPT_DISABLE (1<<5)
...@@ -655,6 +668,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); ...@@ -655,6 +668,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/** P1 value is 2 greater than this field */ /** P1 value is 2 greater than this field */
# define VGA0_PD_P1_MASK (0x1f << 0) # define VGA0_PD_P1_MASK (0x1f << 0)
#define DSPCLK_GATE_D 0x6200
/* I830 CRTC registers */ /* I830 CRTC registers */
#define HTOTAL_A 0x60000 #define HTOTAL_A 0x60000
#define HBLANK_A 0x60004 #define HBLANK_A 0x60004
...@@ -1101,6 +1116,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); ...@@ -1101,6 +1116,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev)) IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024) #define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif #endif
...@@ -825,11 +825,19 @@ static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ...@@ -825,11 +825,19 @@ static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
return ret; return ret;
} }
static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
return RADEON_READ(RS690_MC_DATA);
}
u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
{ {
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
else else
...@@ -840,6 +848,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) ...@@ -840,6 +848,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
{ {
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
else else
...@@ -850,6 +860,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo ...@@ -850,6 +860,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
{ {
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
else else
...@@ -1362,6 +1374,70 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) ...@@ -1362,6 +1374,70 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
} }
} }
/* Enable or disable RS690 GART on the chip */
static void radeon_set_rs690gart(drm_radeon_private_t *dev_priv, int on)
{
u32 temp;
if (on) {
DRM_DEBUG("programming rs690 gart %08X %08lX %08X\n",
dev_priv->gart_vm_start,
(long)dev_priv->gart_info.bus_addr,
dev_priv->gart_size);
temp = RS690_READ_MCIND(dev_priv, RS690_MC_MISC_CNTL);
RS690_WRITE_MCIND(RS690_MC_MISC_CNTL, 0x5000);
RS690_WRITE_MCIND(RS690_MC_AGP_SIZE,
RS690_MC_GART_EN | RS690_MC_AGP_SIZE_32MB);
temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_FEATURE_ID);
RS690_WRITE_MCIND(RS690_MC_GART_FEATURE_ID, 0x42040800);
RS690_WRITE_MCIND(RS690_MC_GART_BASE,
dev_priv->gart_info.bus_addr);
temp = RS690_READ_MCIND(dev_priv, RS690_MC_AGP_MODE_CONTROL);
RS690_WRITE_MCIND(RS690_MC_AGP_MODE_CONTROL, 0x01400000);
RS690_WRITE_MCIND(RS690_MC_AGP_BASE,
(unsigned int)dev_priv->gart_vm_start);
dev_priv->gart_size = 32*1024*1024;
temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
0xffff0000) | (dev_priv->gart_vm_start >> 16));
RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, temp);
temp = RS690_READ_MCIND(dev_priv, RS690_MC_AGP_SIZE);
RS690_WRITE_MCIND(RS690_MC_AGP_SIZE,
RS690_MC_GART_EN | RS690_MC_AGP_SIZE_32MB);
do {
temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_CACHE_CNTL);
if ((temp & RS690_MC_GART_CLEAR_STATUS) ==
RS690_MC_GART_CLEAR_DONE)
break;
DRM_UDELAY(1);
} while (1);
RS690_WRITE_MCIND(RS690_MC_GART_CACHE_CNTL,
RS690_MC_GART_CC_CLEAR);
do {
temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_CACHE_CNTL);
if ((temp & RS690_MC_GART_CLEAR_STATUS) ==
RS690_MC_GART_CLEAR_DONE)
break;
DRM_UDELAY(1);
} while (1);
RS690_WRITE_MCIND(RS690_MC_GART_CACHE_CNTL,
RS690_MC_GART_CC_NO_CHANGE);
} else {
RS690_WRITE_MCIND(RS690_MC_AGP_SIZE, RS690_MC_GART_DIS);
}
}
static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
{ {
u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
...@@ -1396,6 +1472,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) ...@@ -1396,6 +1472,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{ {
u32 tmp; u32 tmp;
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
radeon_set_rs690gart(dev_priv, on);
return;
}
if (dev_priv->flags & RADEON_IS_IGPGART) { if (dev_priv->flags & RADEON_IS_IGPGART) {
radeon_set_igpgart(dev_priv, on); radeon_set_igpgart(dev_priv, on);
return; return;
......
...@@ -123,6 +123,7 @@ enum radeon_family { ...@@ -123,6 +123,7 @@ enum radeon_family {
CHIP_R420, CHIP_R420,
CHIP_RV410, CHIP_RV410,
CHIP_RS400, CHIP_RS400,
CHIP_RS690,
CHIP_RV515, CHIP_RV515,
CHIP_R520, CHIP_R520,
CHIP_RV530, CHIP_RV530,
...@@ -467,6 +468,36 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev, ...@@ -467,6 +468,36 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_IGPGART_ENABLE 0x38 #define RADEON_IGPGART_ENABLE 0x38
#define RADEON_IGPGART_UNK_39 0x39 #define RADEON_IGPGART_UNK_39 0x39
#define RS690_MC_INDEX 0x78
# define RS690_MC_INDEX_MASK 0x1ff
# define RS690_MC_INDEX_WR_EN (1 << 9)
# define RS690_MC_INDEX_WR_ACK 0x7f
#define RS690_MC_DATA 0x7c
#define RS690_MC_MISC_CNTL 0x18
#define RS690_MC_GART_FEATURE_ID 0x2b
#define RS690_MC_GART_BASE 0x2c
#define RS690_MC_GART_CACHE_CNTL 0x2e
# define RS690_MC_GART_CC_NO_CHANGE 0x0
# define RS690_MC_GART_CC_CLEAR 0x1
# define RS690_MC_GART_CLEAR_STATUS (1 << 1)
# define RS690_MC_GART_CLEAR_DONE (0 << 1)
# define RS690_MC_GART_CLEAR_PENDING (1 << 1)
#define RS690_MC_AGP_SIZE 0x38
# define RS690_MC_GART_DIS 0x0
# define RS690_MC_GART_EN 0x1
# define RS690_MC_AGP_SIZE_32MB (0 << 1)
# define RS690_MC_AGP_SIZE_64MB (1 << 1)
# define RS690_MC_AGP_SIZE_128MB (2 << 1)
# define RS690_MC_AGP_SIZE_256MB (3 << 1)
# define RS690_MC_AGP_SIZE_512MB (4 << 1)
# define RS690_MC_AGP_SIZE_1GB (5 << 1)
# define RS690_MC_AGP_SIZE_2GB (6 << 1)
#define RS690_MC_AGP_MODE_CONTROL 0x39
#define RS690_MC_FB_LOCATION 0x100
#define RS690_MC_AGP_LOCATION 0x101
#define RS690_MC_AGP_BASE 0x102
#define R520_MC_IND_INDEX 0x70 #define R520_MC_IND_INDEX 0x70
#define R520_MC_IND_WR_EN (1<<24) #define R520_MC_IND_WR_EN (1<<24)
#define R520_MC_IND_DATA 0x74 #define R520_MC_IND_DATA 0x74
...@@ -1076,6 +1107,13 @@ do { \ ...@@ -1076,6 +1107,13 @@ do { \
RADEON_WRITE(R520_MC_IND_INDEX, 0); \ RADEON_WRITE(R520_MC_IND_INDEX, 0); \
} while (0) } while (0)
#define RS690_WRITE_MCIND( addr, val ) \
do { \
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
RADEON_WRITE(RS690_MC_DATA, val); \
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
} while (0)
#define CP_PACKET0( reg, n ) \ #define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2)) (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET0_TABLE( reg, n ) \ #define CP_PACKET0_TABLE( reg, n ) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment