Commit 5f757f91 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-patches' of master.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-patches' of master.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/i915: Add 965GM pci id update
  drm: just use io_remap_pfn_range on all archs..
  drm: fix DRM_CONSISTENT mapping
  drm: fix up mmap locking in preparation for ttm changes
  drm: fix driver deadlock with AIGLX and reclaim_buffers_locked
  drm: fix warning in drm_fops.c
  drm: allow for more generic drm ioctls
  drm: fix alpha domain handling
  via: fix CX700 pci id
  drm: make drm_io_prot static.
  drm: remove via_mm.h
  drm: add missing NULL assignment
  drm/radeon: Fix u32 overflows when determining AGP base address in card space.
  drm: port over use_vmalloc code from git hashtab
  drm: fix crash with fops lock and fixup sarea/page size locking
  drm: bring bufs code from git tree.
  drm: move protection stuff into separate function
  drm: Use ARRAY_SIZE macro when appropriate
  drm: update README.drm (bugzilla #7933)
  drm: remove unused exports
parents 9fa0853a ce7dd063
************************************************************ ************************************************************
* For the very latest on DRI development, please see: * * For the very latest on DRI development, please see: *
* http://dri.sourceforge.net/ * * http://dri.freedesktop.org/ *
************************************************************ ************************************************************
The Direct Rendering Manager (drm) is a device-independent kernel-level The Direct Rendering Manager (drm) is a device-independent kernel-level
...@@ -26,21 +26,19 @@ ways: ...@@ -26,21 +26,19 @@ ways:
Documentation on the DRI is available from: Documentation on the DRI is available from:
http://precisioninsight.com/piinsights.html http://dri.freedesktop.org/wiki/Documentation
http://sourceforge.net/project/showfiles.php?group_id=387
http://dri.sourceforge.net/doc/
For specific information about kernel-level support, see: For specific information about kernel-level support, see:
The Direct Rendering Manager, Kernel Support for the Direct Rendering The Direct Rendering Manager, Kernel Support for the Direct Rendering
Infrastructure Infrastructure
http://precisioninsight.com/dr/drm.html http://dri.sourceforge.net/doc/drm_low_level.html
Hardware Locking for the Direct Rendering Infrastructure Hardware Locking for the Direct Rendering Infrastructure
http://precisioninsight.com/dr/locking.html http://dri.sourceforge.net/doc/hardware_locking_low_level.html
A Security Analysis of the Direct Rendering Infrastructure A Security Analysis of the Direct Rendering Infrastructure
http://precisioninsight.com/dr/security.html http://dri.sourceforge.net/doc/security_low_level.html
************************************************************
* For the very latest on DRI development, please see: *
* http://dri.sourceforge.net/ *
************************************************************
...@@ -654,11 +654,13 @@ typedef struct drm_set_version { ...@@ -654,11 +654,13 @@ typedef struct drm_set_version {
/** /**
* Device specific ioctls should only be in their respective headers * Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x79. * The device specific ioctl range is from 0x40 to 0x99.
* Generic IOCTLS restart at 0xA0.
* *
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
* drmCommandReadWrite(). * drmCommandReadWrite().
*/ */
#define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
#endif #endif
...@@ -414,6 +414,10 @@ typedef struct drm_lock_data { ...@@ -414,6 +414,10 @@ typedef struct drm_lock_data {
struct file *filp; /**< File descr of lock holder (0=kernel) */ struct file *filp; /**< File descr of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /**< Queue of blocked processes */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */ unsigned long lock_time; /**< Time of last lock in jiffies */
spinlock_t spinlock;
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
} drm_lock_data_t; } drm_lock_data_t;
/** /**
...@@ -590,6 +594,8 @@ struct drm_driver { ...@@ -590,6 +594,8 @@ struct drm_driver {
void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
void (*reclaim_buffers_locked) (struct drm_device *dev, void (*reclaim_buffers_locked) (struct drm_device *dev,
struct file *filp); struct file *filp);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct file * filp);
unsigned long (*get_map_ofs) (drm_map_t * map); unsigned long (*get_map_ofs) (drm_map_t * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev); unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
...@@ -764,7 +770,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev, ...@@ -764,7 +770,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
} }
#ifdef __alpha__ #ifdef __alpha__
#define drm_get_pci_domain(dev) dev->hose->bus->number #define drm_get_pci_domain(dev) dev->hose->index
#else #else
#define drm_get_pci_domain(dev) 0 #define drm_get_pci_domain(dev) 0
#endif #endif
...@@ -915,9 +921,18 @@ extern int drm_lock(struct inode *inode, struct file *filp, ...@@ -915,9 +921,18 @@ extern int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_unlock(struct inode *inode, struct file *filp, extern int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context); extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
extern int drm_lock_free(drm_device_t * dev, extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
__volatile__ unsigned int *lock, unsigned int context); extern void drm_idlelock_take(drm_lock_data_t *lock_data);
extern void drm_idlelock_release(drm_lock_data_t *lock_data);
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
extern int drm_i_have_hw_lock(struct file *filp);
extern int drm_kernel_take_hw_lock(struct file *filp);
/* Buffer management support (drm_bufs.h) */ /* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
......
...@@ -57,7 +57,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, ...@@ -57,7 +57,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
if (entry->map && map->type == entry->map->type && if (entry->map && map->type == entry->map->type &&
entry->map->offset == map->offset) { ((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
return entry; return entry;
} }
} }
...@@ -180,8 +181,20 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, ...@@ -180,8 +181,20 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
if (map->type == _DRM_REGISTERS) if (map->type == _DRM_REGISTERS)
map->handle = ioremap(map->offset, map->size); map->handle = ioremap(map->offset, map->size);
break; break;
case _DRM_SHM: case _DRM_SHM:
list = drm_find_matching_map(dev, map);
if (list != NULL) {
if(list->map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size, list->map->size);
list->map->size = map->size;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
*maplist = list;
return 0;
}
map->handle = vmalloc_user(map->size); map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n", DRM_DEBUG("%lu %d %p\n",
map->size, drm_order(map->size), map->handle); map->size, drm_order(map->size), map->handle);
...@@ -200,15 +213,45 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, ...@@ -200,15 +213,45 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
} }
break; break;
case _DRM_AGP: case _DRM_AGP: {
if (drm_core_has_AGP(dev)) { drm_agp_mem_t *entry;
int valid = 0;
if (!drm_core_has_AGP(dev)) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
#ifdef __alpha__ #ifdef __alpha__
map->offset += dev->hose->mem_space->start; map->offset += dev->hose->mem_space->start;
#endif #endif
/* Note: dev->agp->base may actually be 0 when the DRM
* is not in control of AGP space. But if user space is
* it should already have added the AGP base itself.
*/
map->offset += dev->agp->base; map->offset += dev->agp->base;
map->mtrr = dev->agp->agp_mtrr; /* for getmap */ map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space.
* It's not always the case as AGP can be in the control
* of user space (i.e. i810 driver). So this loop will get
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
for (entry = dev->agp->memory; entry; entry = entry->next) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
} }
}
if (dev->agp->memory && !valid) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EPERM;
}
DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
break; break;
}
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
if (!dev->sg) { if (!dev->sg) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
...@@ -267,7 +310,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, ...@@ -267,7 +310,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
*maplist = list; *maplist = list;
return 0; return 0;
} }
int drm_addmap(drm_device_t * dev, unsigned int offset, int drm_addmap(drm_device_t * dev, unsigned int offset,
unsigned int size, drm_map_type_t type, unsigned int size, drm_map_type_t type,
...@@ -519,6 +562,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) ...@@ -519,6 +562,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry; drm_buf_entry_t *entry;
drm_agp_mem_t *agp_entry;
drm_buf_t *buf; drm_buf_t *buf;
unsigned long offset; unsigned long offset;
unsigned long agp_offset; unsigned long agp_offset;
...@@ -529,7 +573,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) ...@@ -529,7 +573,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
int page_order; int page_order;
int total; int total;
int byte_count; int byte_count;
int i; int i, valid;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
if (!dma) if (!dma)
...@@ -560,6 +604,19 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) ...@@ -560,6 +604,19 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
if (dev->queue_count) if (dev->queue_count)
return -EBUSY; /* Not while in use */ return -EBUSY; /* Not while in use */
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (dev->agp->memory && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
spin_lock(&dev->count_lock); spin_lock(&dev->count_lock);
if (dev->buf_use) { if (dev->buf_use) {
spin_unlock(&dev->count_lock); spin_unlock(&dev->count_lock);
......
...@@ -496,11 +496,14 @@ int drm_ioctl(struct inode *inode, struct file *filp, ...@@ -496,11 +496,14 @@ int drm_ioctl(struct inode *inode, struct file *filp,
(long)old_encode_dev(priv->head->device), (long)old_encode_dev(priv->head->device),
priv->authenticated); priv->authenticated);
if (nr < DRIVER_IOCTL_COUNT) if ((nr >= DRIVER_IOCTL_COUNT) &&
ioctl = &drm_ioctls[nr]; ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
else if ((nr >= DRM_COMMAND_BASE) goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
ioctl = &drm_ioctls[nr];
else else
goto err_i1; goto err_i1;
......
...@@ -46,6 +46,7 @@ static int drm_setup(drm_device_t * dev) ...@@ -46,6 +46,7 @@ static int drm_setup(drm_device_t * dev)
drm_local_map_t *map; drm_local_map_t *map;
int i; int i;
int ret; int ret;
u32 sareapage;
if (dev->driver->firstopen) { if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev); ret = dev->driver->firstopen(dev);
...@@ -56,7 +57,8 @@ static int drm_setup(drm_device_t * dev) ...@@ -56,7 +57,8 @@ static int drm_setup(drm_device_t * dev)
dev->magicfree.next = NULL; dev->magicfree.next = NULL;
/* prebuild the SAREA */ /* prebuild the SAREA */
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0) if (i != 0)
return i; return i;
...@@ -84,7 +86,7 @@ static int drm_setup(drm_device_t * dev) ...@@ -84,7 +86,7 @@ static int drm_setup(drm_device_t * dev)
INIT_LIST_HEAD(&dev->ctxlist->head); INIT_LIST_HEAD(&dev->ctxlist->head);
dev->vmalist = NULL; dev->vmalist = NULL;
dev->sigdata.lock = dev->lock.hw_lock = NULL; dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue); init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0; dev->queue_count = 0;
dev->queue_reserved = 0; dev->queue_reserved = 0;
...@@ -354,58 +356,56 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -354,58 +356,56 @@ int drm_release(struct inode *inode, struct file *filp)
current->pid, (long)old_encode_dev(priv->head->device), current->pid, (long)old_encode_dev(priv->head->device),
dev->open_count); dev->open_count);
if (priv->lock_count && dev->lock.hw_lock && if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && if (drm_i_have_hw_lock(filp)) {
dev->lock.filp == filp) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
if (dev->driver->reclaim_buffers_locked)
dev->driver->reclaim_buffers_locked(dev, filp); dev->driver->reclaim_buffers_locked(dev, filp);
} else {
unsigned long _end=jiffies + 3*DRM_HZ;
int locked = 0;
drm_lock_free(dev, &dev->lock.hw_lock->lock, drm_idlelock_take(&dev->lock);
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of /*
hardware at this point, possibly * Wait for a while.
processed via a callback to the X */
server. */
} else if (dev->driver->reclaim_buffers_locked && priv->lock_count do{
&& dev->lock.hw_lock) { spin_lock(&dev->lock.spinlock);
/* The lock is required to reclaim buffers */ locked = dev->lock.idle_has_lock;
DECLARE_WAITQUEUE(entry, current); spin_unlock(&dev->lock.spinlock);
if (locked)
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
retcode = -EINTR;
break; break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
schedule(); schedule();
if (signal_pending(current)) { } while (!time_after_eq(jiffies, _end));
retcode = -ERESTARTSYS;
break; if (!locked) {
} DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
"\tdriver to use reclaim_buffers_idlelocked() instead.\n"
"\tI will go on reclaiming the buffers anyway.\n");
} }
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
if (!retcode) {
dev->driver->reclaim_buffers_locked(dev, filp); dev->driver->reclaim_buffers_locked(dev, filp);
drm_lock_free(dev, &dev->lock.hw_lock->lock, drm_idlelock_release(&dev->lock);
DRM_KERNEL_CONTEXT);
} }
} }
if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
drm_idlelock_take(&dev->lock);
dev->driver->reclaim_buffers_idlelocked(dev, filp);
drm_idlelock_release(&dev->lock);
}
if (drm_i_have_hw_lock(filp)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) { !dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, filp); dev->driver->reclaim_buffers(dev, filp);
......
...@@ -43,7 +43,16 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order) ...@@ -43,7 +43,16 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
ht->size = 1 << order; ht->size = 1 << order;
ht->order = order; ht->order = order;
ht->fill = 0; ht->fill = 0;
ht->table = NULL;
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
if (!ht->use_vmalloc) {
ht->table = drm_calloc(ht->size, sizeof(*ht->table),
DRM_MEM_HASHTAB);
}
if (!ht->table) {
ht->use_vmalloc = 1;
ht->table = vmalloc(ht->size*sizeof(*ht->table)); ht->table = vmalloc(ht->size*sizeof(*ht->table));
}
if (!ht->table) { if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n"); DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM; return -ENOMEM;
...@@ -183,7 +192,11 @@ int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item) ...@@ -183,7 +192,11 @@ int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
void drm_ht_remove(drm_open_hash_t *ht) void drm_ht_remove(drm_open_hash_t *ht)
{ {
if (ht->table) { if (ht->table) {
if (ht->use_vmalloc)
vfree(ht->table); vfree(ht->table);
else
drm_free(ht->table, ht->size * sizeof(*ht->table),
DRM_MEM_HASHTAB);
ht->table = NULL; ht->table = NULL;
} }
} }
......
...@@ -47,6 +47,7 @@ typedef struct drm_open_hash{ ...@@ -47,6 +47,7 @@ typedef struct drm_open_hash{
unsigned int order; unsigned int order;
unsigned int fill; unsigned int fill;
struct hlist_head *table; struct hlist_head *table;
int use_vmalloc;
} drm_open_hash_t; } drm_open_hash_t;
......
...@@ -424,7 +424,7 @@ static void drm_locked_tasklet_func(unsigned long data) ...@@ -424,7 +424,7 @@ static void drm_locked_tasklet_func(unsigned long data)
spin_lock_irqsave(&dev->tasklet_lock, irqflags); spin_lock_irqsave(&dev->tasklet_lock, irqflags);
if (!dev->locked_tasklet_func || if (!dev->locked_tasklet_func ||
!drm_lock_take(&dev->lock.hw_lock->lock, !drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT)) { DRM_KERNEL_CONTEXT)) {
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return; return;
...@@ -435,7 +435,7 @@ static void drm_locked_tasklet_func(unsigned long data) ...@@ -435,7 +435,7 @@ static void drm_locked_tasklet_func(unsigned long data)
dev->locked_tasklet_func(dev); dev->locked_tasklet_func(dev);
drm_lock_free(dev, &dev->lock.hw_lock->lock, drm_lock_free(&dev->lock,
DRM_KERNEL_CONTEXT); DRM_KERNEL_CONTEXT);
dev->locked_tasklet_func = NULL; dev->locked_tasklet_func = NULL;
......
...@@ -35,9 +35,6 @@ ...@@ -35,9 +35,6 @@
#include "drmP.h" #include "drmP.h"
static int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock,
unsigned int context);
static int drm_notifier(void *priv); static int drm_notifier(void *priv);
/** /**
...@@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp, ...@@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp,
return -EINVAL; return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry); add_wait_queue(&dev->lock.lock_queue, &entry);
spin_lock(&dev->lock.spinlock);
dev->lock.user_waiters++;
spin_unlock(&dev->lock.spinlock);
for (;;) { for (;;) {
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) { if (!dev->lock.hw_lock) {
...@@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp, ...@@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp,
ret = -EINTR; ret = -EINTR;
break; break;
} }
if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { if (drm_lock_take(&dev->lock, lock.context)) {
dev->lock.filp = filp; dev->lock.filp = filp;
dev->lock.lock_time = jiffies; dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
...@@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct file *filp, ...@@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct file *filp,
break; break;
} }
} }
spin_lock(&dev->lock.spinlock);
dev->lock.user_waiters--;
spin_unlock(&dev->lock.spinlock);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry); remove_wait_queue(&dev->lock.lock_queue, &entry);
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
if (ret) if (ret) return ret;
return ret;
sigemptyset(&dev->sigmask); sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP); sigaddset(&dev->sigmask, SIGSTOP);
...@@ -127,14 +129,12 @@ int drm_lock(struct inode *inode, struct file *filp, ...@@ -127,14 +129,12 @@ int drm_lock(struct inode *inode, struct file *filp,
} }
} }
/* dev->driver->kernel_context_switch isn't used by any of the x86
* drivers but is used by the Sparc driver.
*/
if (dev->driver->kernel_context_switch && if (dev->driver->kernel_context_switch &&
dev->last_context != lock.context) { dev->last_context != lock.context) {
dev->driver->kernel_context_switch(dev, dev->last_context, dev->driver->kernel_context_switch(dev, dev->last_context,
lock.context); lock.context);
} }
return 0; return 0;
} }
...@@ -184,12 +184,8 @@ int drm_unlock(struct inode *inode, struct file *filp, ...@@ -184,12 +184,8 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock) if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev); dev->driver->kernel_context_switch_unlock(dev);
else { else {
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, if (drm_lock_free(&dev->lock,lock.context)) {
DRM_KERNEL_CONTEXT); /* FIXME: Should really bail out here. */
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
} }
} }
...@@ -206,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp, ...@@ -206,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp,
* *
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/ */
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) int drm_lock_take(drm_lock_data_t *lock_data,
unsigned int context)
{ {
unsigned int old, new, prev; unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
do { do {
old = *lock; old = *lock;
if (old & _DRM_LOCK_HELD) if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT; new = old | _DRM_LOCK_CONT;
else else {
new = context | _DRM_LOCK_HELD; new = context | _DRM_LOCK_HELD |
((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
_DRM_LOCK_CONT : 0);
}
prev = cmpxchg(lock, old, new); prev = cmpxchg(lock, old, new);
} while (prev != old); } while (prev != old);
spin_unlock(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) { if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) { if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) { if (context != DRM_KERNEL_CONTEXT) {
...@@ -227,7 +231,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) ...@@ -227,7 +231,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0; return 0;
} }
} }
if (new == (context | _DRM_LOCK_HELD)) {
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */ /* Have lock */
return 1; return 1;
} }
...@@ -246,13 +251,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) ...@@ -246,13 +251,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
* Resets the lock file pointer. * Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction. * Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/ */
static int drm_lock_transfer(drm_device_t * dev, static int drm_lock_transfer(drm_lock_data_t *lock_data,
__volatile__ unsigned int *lock,
unsigned int context) unsigned int context)
{ {
unsigned int old, new, prev; unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
dev->lock.filp = NULL; lock_data->filp = NULL;
do { do {
old = *lock; old = *lock;
new = context | _DRM_LOCK_HELD; new = context | _DRM_LOCK_HELD;
...@@ -272,23 +277,32 @@ static int drm_lock_transfer(drm_device_t * dev, ...@@ -272,23 +277,32 @@ static int drm_lock_transfer(drm_device_t * dev,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue. * waiting on the lock queue.
*/ */
int drm_lock_free(drm_device_t * dev, int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
__volatile__ unsigned int *lock, unsigned int context)
{ {
unsigned int old, new, prev; unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
spin_unlock(&lock_data->spinlock);
return 1;
}
spin_unlock(&lock_data->spinlock);
dev->lock.filp = NULL;
do { do {
old = *lock; old = *lock;
new = 0; new = _DRM_LOCKING_CONTEXT(old);
prev = cmpxchg(lock, old, new); prev = cmpxchg(lock, old, new);
} while (prev != old); } while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n", DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old)); context, _DRM_LOCKING_CONTEXT(old));
return 1; return 1;
} }
wake_up_interruptible(&dev->lock.lock_queue); wake_up_interruptible(&lock_data->lock_queue);
return 0; return 0;
} }
...@@ -322,3 +336,67 @@ static int drm_notifier(void *priv) ...@@ -322,3 +336,67 @@ static int drm_notifier(void *priv)
} while (prev != old); } while (prev != old);
return 0; return 0;
} }
/**
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
*
* This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
* by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
* a deadlock, which is why the "idlelock" was invented).
*
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
void drm_idlelock_take(drm_lock_data_t *lock_data)
{
int ret = 0;
spin_lock(&lock_data->spinlock);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
spin_unlock(&lock_data->spinlock);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
spin_lock(&lock_data->spinlock);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
spin_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(drm_lock_data_t *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
old = *lock;
prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
} while (prev != old);
wake_up_interruptible(&lock_data->lock_queue);
lock_data->idle_has_lock = 0;
}
}
spin_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct file *filp)
{
DRM_DEVICE;
return (priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.filp == filp);
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
...@@ -274,7 +274,6 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) ...@@ -274,7 +274,6 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
return drm_mm_create_tail_node(mm, start, size); return drm_mm_create_tail_node(mm, start, size);
} }
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(drm_mm_t * mm) void drm_mm_takedown(drm_mm_t * mm)
{ {
...@@ -295,4 +294,3 @@ void drm_mm_takedown(drm_mm_t * mm) ...@@ -295,4 +294,3 @@ void drm_mm_takedown(drm_mm_t * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM); drm_free(entry, sizeof(*entry), DRM_MEM_MM);
} }
EXPORT_SYMBOL(drm_mm_takedown);
...@@ -230,10 +230,10 @@ ...@@ -230,10 +230,10 @@
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
{0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
{0, 0, 0} {0, 0, 0}
#define i810_PCI_IDS \ #define i810_PCI_IDS \
...@@ -296,5 +296,6 @@ ...@@ -296,5 +296,6 @@
{0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0} {0, 0, 0}
...@@ -72,7 +72,7 @@ static struct drm_proc_list { ...@@ -72,7 +72,7 @@ static struct drm_proc_list {
#endif #endif
}; };
#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0])) #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
/** /**
* Initialize the DRI proc filesystem for a device. * Initialize the DRI proc filesystem for a device.
......
...@@ -62,6 +62,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, ...@@ -62,6 +62,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
spin_lock_init(&dev->count_lock); spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock); spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock); spin_lock_init(&dev->tasklet_lock);
spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer); init_timer(&dev->timer);
mutex_init(&dev->struct_mutex); mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->ctxlist_mutex);
......
...@@ -41,6 +41,30 @@ ...@@ -41,6 +41,30 @@
static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma);
static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
pgprot_val(tmp) |= _PAGE_PCD;
pgprot_val(tmp) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map_type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#endif
#if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
/** /**
* \c nopage method for AGP virtual memory. * \c nopage method for AGP virtual memory.
* *
...@@ -151,8 +175,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, ...@@ -151,8 +175,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
offset = address - vma->vm_start; offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset; i = (unsigned long)map->handle + offset;
page = (map->type == _DRM_CONSISTENT) ? page = vmalloc_to_page((void *)i);
virt_to_page((void *)i) : vmalloc_to_page((void *)i);
if (!page) if (!page)
return NOPAGE_SIGBUS; return NOPAGE_SIGBUS;
get_page(page); get_page(page);
...@@ -389,7 +412,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { ...@@ -389,7 +412,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and * Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist. * add it to drm_device::vmalist.
*/ */
static void drm_vm_open(struct vm_area_struct *vma) static void drm_vm_open_locked(struct vm_area_struct *vma)
{ {
drm_file_t *priv = vma->vm_file->private_data; drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev; drm_device_t *dev = priv->head->dev;
...@@ -401,15 +424,23 @@ static void drm_vm_open(struct vm_area_struct *vma) ...@@ -401,15 +424,23 @@ static void drm_vm_open(struct vm_area_struct *vma)
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) { if (vma_entry) {
mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma; vma_entry->vma = vma;
vma_entry->next = dev->vmalist; vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid; vma_entry->pid = current->pid;
dev->vmalist = vma_entry; dev->vmalist = vma_entry;
mutex_unlock(&dev->struct_mutex);
} }
} }
static void drm_vm_open(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
/** /**
* \c close method for all virtual memory types. * \c close method for all virtual memory types.
* *
...@@ -460,7 +491,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) ...@@ -460,7 +491,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
drm_device_dma_t *dma; drm_device_dma_t *dma;
unsigned long length = vma->vm_end - vma->vm_start; unsigned long length = vma->vm_end - vma->vm_start;
lock_kernel();
dev = priv->head->dev; dev = priv->head->dev;
dma = dev->dma; dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
...@@ -468,10 +498,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) ...@@ -468,10 +498,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
/* Length must match exact page count */ /* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
unlock_kernel();
return -EINVAL; return -EINVAL;
} }
unlock_kernel();
if (!capable(CAP_SYS_ADMIN) && if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) { (dma->flags & _DRM_DMA_USE_PCI_RO)) {
...@@ -494,7 +522,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) ...@@ -494,7 +522,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open_locked(vma);
return 0; return 0;
} }
...@@ -529,7 +557,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); ...@@ -529,7 +557,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file * according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open(). * pointer and calls vm_open().
*/ */
int drm_mmap(struct file *filp, struct vm_area_struct *vma) static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{ {
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev; drm_device_t *dev = priv->head->dev;
...@@ -565,7 +593,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -565,7 +593,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return -EPERM; return -EPERM;
/* Check for valid size. */ /* Check for valid size. */
if (map->size != vma->vm_end - vma->vm_start) if (map->size < vma->vm_end - vma->vm_start)
return -EINVAL; return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
...@@ -600,37 +628,16 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -600,37 +628,16 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */ /* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS: case _DRM_REGISTERS:
#if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
#if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
vma->vm_page_prot =
pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
offset = dev->driver->get_reg_ofs(dev); offset = dev->driver->get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__ #ifdef __sparc__
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
if (io_remap_pfn_range(vma, vma->vm_start, if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT, (map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
#else
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
#endif
return -EAGAIN; return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n", " offset = 0x%lx\n",
...@@ -638,10 +645,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -638,10 +645,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_start, vma->vm_end, map->offset + offset); vma->vm_start, vma->vm_end, map->offset + offset);
vma->vm_ops = &drm_vm_ops; vma->vm_ops = &drm_vm_ops;
break; break;
case _DRM_SHM:
case _DRM_CONSISTENT: case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. It's only /* Consistent memory is really like shared memory. But
* allocate in a different way */ * it's allocated in a different way, so avoid nopage */
if (remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
/* fall through to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops; vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map; vma->vm_private_data = (void *)map;
/* Don't let this area swap. Change when /* Don't let this area swap. Change when
...@@ -659,8 +671,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -659,8 +671,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open_locked(vma);
return 0; return 0;
} }
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_mmap); EXPORT_SYMBOL(drm_mmap);
...@@ -34,7 +34,8 @@ ...@@ -34,7 +34,8 @@
#define IS_I965G(dev) (dev->pci_device == 0x2972 || \ #define IS_I965G(dev) (dev->pci_device == 0x2972 || \
dev->pci_device == 0x2982 || \ dev->pci_device == 0x2982 || \
dev->pci_device == 0x2992 || \ dev->pci_device == 0x2992 || \
dev->pci_device == 0x29A2) dev->pci_device == 0x29A2 || \
dev->pci_device == 0x2A02)
/* Really want an OS-independent resettable timer. Would like to have /* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time * this loop run for (eg) 3 sec, but have the timer reset every time
......
...@@ -1560,8 +1560,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) ...@@ -1560,8 +1560,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->flags & RADEON_IS_AGP) {
base = dev->agp->base; base = dev->agp->base;
/* Check if valid */ /* Check if valid */
if ((base + dev_priv->gart_size) > dev_priv->fb_location && if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
base < (dev_priv->fb_location + dev_priv->fb_size)) { base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
dev->agp->base); dev->agp->base);
base = 0; base = 0;
...@@ -1571,8 +1571,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) ...@@ -1571,8 +1571,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
/* If not or if AGP is at 0 (Macs), try to put it elsewhere */ /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
if (base == 0) { if (base == 0) {
base = dev_priv->fb_location + dev_priv->fb_size; base = dev_priv->fb_location + dev_priv->fb_size;
if (((base + dev_priv->gart_size) & 0xfffffffful) if (base < dev_priv->fb_location ||
< base) ((base + dev_priv->gart_size) & 0xfffffffful) < base)
base = dev_priv->fb_location base = dev_priv->fb_location
- dev_priv->gart_size; - dev_priv->gart_size;
} }
......
...@@ -71,7 +71,7 @@ static struct drm_driver driver = { ...@@ -71,7 +71,7 @@ static struct drm_driver driver = {
.context_dtor = NULL, .context_dtor = NULL,
.dma_quiescent = sis_idle, .dma_quiescent = sis_idle,
.reclaim_buffers = NULL, .reclaim_buffers = NULL,
.reclaim_buffers_locked = sis_reclaim_buffers_locked, .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose, .lastclose = sis_lastclose,
.get_map_ofs = drm_core_get_map_ofs, .get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs, .get_reg_ofs = drm_core_get_reg_ofs,
......
...@@ -52,7 +52,8 @@ static struct drm_driver driver = { ...@@ -52,7 +52,8 @@ static struct drm_driver driver = {
.dma_quiescent = via_driver_dma_quiescent, .dma_quiescent = via_driver_dma_quiescent,
.dri_library_name = dri_library_name, .dri_library_name = dri_library_name,
.reclaim_buffers = drm_core_reclaim_buffers, .reclaim_buffers = drm_core_reclaim_buffers,
.reclaim_buffers_locked = via_reclaim_buffers_locked, .reclaim_buffers_locked = NULL,
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose, .lastclose = via_lastclose,
.get_map_ofs = drm_core_get_map_ofs, .get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs, .get_reg_ofs = drm_core_get_reg_ofs,
......
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _via_drm_mm_h_
#define _via_drm_mm_h_
typedef struct {
unsigned int context;
unsigned int size;
unsigned long offset;
unsigned long free;
} drm_via_mm_t;
typedef struct {
unsigned int size;
unsigned long handle;
void *virtual;
} drm_via_dma_t;
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment