Commit 3f4b5c5d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (37 commits)
  drm/i915: fix modeset devname allocation + agp init return check.
  drm/i915: Remove redundant test in error path.
  drm: Add a debug node for vblank state.
  drm: Avoid use-before-null-test on dev in drm_cleanup().
  drm/i915: Don't print to dmesg when taking signal during object_pin.
  drm: pin new and unpin old buffer when setting a mode.
  drm/i915: un-EXPORT and make 'intelfb_panic' static
  drm/i915: Delete unused, pointless i915_driver_firstopen.
  drm/i915: fix sparse warnings: returning void-valued expression
  drm/i915: fix sparse warnings: move 'extern' decls to header file
  drm/i915: fix sparse warnings: make symbols static
  drm/i915: fix sparse warnings: declare one-bit bitfield as unsigned
  drm/i915: Don't double-unpin buffers if we take a signal in evict_everything().
  drm/i915: Fix fbcon setup to align display pitch to 64b.
  drm/i915: Add missing userland definitions for gem init/execbuffer.
  i915/drm: provide compat defines for userspace for certain struct members.
  drm: drop DRM_IOCTL_MODE_REPLACEFB, add+remove works just as well.
  drm: sanitise drm modesetting API + remove unused hotplug
  drm: fix allowing master ioctls on non-master fds.
  drm/radeon: use locked rmmap to remove sarea mapping.
  ...
parents a4ba2e9e aa596629
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
menuconfig DRM menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
help help
Kernel-level support for the Direct Rendering Infrastructure (DRI) Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select introduced in XFree86 4.0. If you say Y here, you need to select
...@@ -65,6 +67,10 @@ config DRM_I830 ...@@ -65,6 +67,10 @@ config DRM_I830
will load the correct one. will load the correct one.
config DRM_I915 config DRM_I915
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
depends on FB
tristate "i915 driver" tristate "i915 driver"
help help
Choose this option if you have a system that has Intel 830M, 845G, Choose this option if you have a system that has Intel 830M, 845G,
...@@ -76,6 +82,17 @@ config DRM_I915 ...@@ -76,6 +82,17 @@ config DRM_I915
endchoice endchoice
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
help
Choose this option if you want kernel modesetting enabled by default,
and you have a new enough userspace to support this. Running old
userspaces with this enabled will cause pain. Note that this causes
the driver to bind to PCI devices, which precludes loading things
like intelfb.
config DRM_MGA config DRM_MGA
tristate "Matrox g200/g400" tristate "Matrox g200/g400"
depends on DRM depends on DRM
......
...@@ -9,7 +9,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ ...@@ -9,7 +9,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_COMPAT) += drm_ioc32.o
......
...@@ -45,14 +45,15 @@ ...@@ -45,14 +45,15 @@
* the one with matching magic number, while holding the drm_device::struct_mutex * the one with matching magic number, while holding the drm_device::struct_mutex
* lock. * lock.
*/ */
static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
{ {
struct drm_file *retval = NULL; struct drm_file *retval = NULL;
struct drm_magic_entry *pt; struct drm_magic_entry *pt;
struct drm_hash_item *hash; struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv; retval = pt->priv;
} }
...@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic ...@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic
* associated the magic number hash key in drm_device::magiclist, while holding * associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock. * the drm_device::struct_mutex lock.
*/ */
static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
drm_magic_t magic) drm_magic_t magic)
{ {
struct drm_magic_entry *entry; struct drm_magic_entry *entry;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic); DRM_DEBUG("%d\n", magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
...@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, ...@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
return -ENOMEM; return -ENOMEM;
memset(entry, 0, sizeof(*entry)); memset(entry, 0, sizeof(*entry));
entry->priv = priv; entry->priv = priv;
entry->hash_item.key = (unsigned long)magic; entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
drm_ht_insert_item(&dev->magiclist, &entry->hash_item); drm_ht_insert_item(&master->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &dev->magicfree); list_add_tail(&entry->head, &master->magicfree);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
...@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, ...@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic * Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock. * number hash key, while holding the drm_device::struct_mutex lock.
*/ */
static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{ {
struct drm_magic_entry *pt; struct drm_magic_entry *pt;
struct drm_hash_item *hash; struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic); DRM_DEBUG("%d\n", magic);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return -EINVAL; return -EINVAL;
} }
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
drm_ht_remove_item(&dev->magiclist, hash); drm_ht_remove_item(&master->magiclist, hash);
list_del(&pt->head); list_del(&pt->head);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
++sequence; /* reserve 0 */ ++sequence; /* reserve 0 */
auth->magic = sequence++; auth->magic = sequence++;
spin_unlock(&lock); spin_unlock(&lock);
} while (drm_find_file(dev, auth->magic)); } while (drm_find_file(file_priv->master, auth->magic));
file_priv->magic = auth->magic; file_priv->magic = auth->magic;
drm_add_magic(dev, file_priv, auth->magic); drm_add_magic(file_priv->master, file_priv, auth->magic);
} }
DRM_DEBUG("%u\n", auth->magic); DRM_DEBUG("%u\n", auth->magic);
...@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data, ...@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file; struct drm_file *file;
DRM_DEBUG("%u\n", auth->magic); DRM_DEBUG("%u\n", auth->magic);
if ((file = drm_find_file(dev, auth->magic))) { if ((file = drm_find_file(file_priv->master, auth->magic))) {
file->authenticated = 1; file->authenticated = 1;
drm_remove_magic(dev, auth->magic); drm_remove_magic(file_priv->master, auth->magic);
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
......
...@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, ...@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
{ {
struct drm_map_list *entry; struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) { list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type && if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
((entry->map->offset == map->offset) || ((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
return entry; return entry;
} }
} }
...@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, ...@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
map->offset = (unsigned long)map->handle; map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) { if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */ /* Prevent a 2nd X Server from creating a 2nd lock */
if (dev->lock.hw_lock != NULL) { if (dev->primary->master->lock.hw_lock != NULL) {
vfree(map->handle); vfree(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EBUSY; return -EBUSY;
} }
dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
} }
break; break;
case _DRM_AGP: { case _DRM_AGP: {
...@@ -261,6 +261,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, ...@@ -261,6 +261,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
} }
DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break; break;
} }
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
...@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, ...@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
list->user_token = list->hash.key << PAGE_SHIFT; list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
list->master = dev->primary->master;
*maplist = list; *maplist = list;
return 0; return 0;
} }
...@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data, ...@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *maplist; struct drm_map_list *maplist;
int err; int err;
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM; return -EPERM;
err = drm_addmap_core(dev, map->offset, map->size, map->type, err = drm_addmap_core(dev, map->offset, map->size, map->type,
...@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) ...@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
struct drm_map_list *r_list = NULL, *list_t; struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah; drm_dma_handle_t dmah;
int found = 0; int found = 0;
struct drm_master *master;
/* Find the list entry for the map and remove it */ /* Find the list entry for the map and remove it */
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) { if (r_list->map == map) {
master = r_list->master;
list_del(&r_list->head); list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash, drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT); r_list->user_token >> PAGE_SHIFT);
...@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) ...@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
break; break;
case _DRM_SHM: case _DRM_SHM:
vfree(map->handle); vfree(map->handle);
if (master) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL; /* SHM removed */
master->lock.file_priv = NULL;
wake_up_interruptible(&master->lock.lock_queue);
}
break; break;
case _DRM_AGP: case _DRM_AGP:
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
...@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) ...@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
dmah.size = map->size; dmah.size = map->size;
__drm_pci_free(dev, &dmah); __drm_pci_free(dev, &dmah);
break; break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break;
} }
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
{ {
......
...@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new) ...@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
* hardware lock is held, clears the drm_device::context_flag and wakes up * hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait. * drm_device::context_wait.
*/ */
static int drm_context_switch_complete(struct drm_device * dev, int new) static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{ {
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies; dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n"); DRM_ERROR("Lock isn't held after context switch\n");
} }
...@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data, ...@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle); DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, ctx->handle); drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0; return 0;
} }
...@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data, ...@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle); DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
file_priv->remove_auth_on_close = 1;
}
if (ctx->handle != DRM_KERNEL_CONTEXT) { if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor) if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle); dev->driver->context_dtor(dev, ctx->handle);
......
This diff is collapsed.
This diff is collapsed.
...@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = { ...@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
...@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = { ...@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
}; };
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
...@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { ...@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
*/ */
int drm_lastclose(struct drm_device * dev) int drm_lastclose(struct drm_device * dev)
{ {
struct drm_magic_entry *pt, *next;
struct drm_map_list *r_list, *list_t;
struct drm_vma_entry *vma, *vma_temp; struct drm_vma_entry *vma, *vma_temp;
int i; int i;
...@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev) ...@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev); dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n"); DRM_DEBUG("driver lastclose completed\n");
if (dev->unique) { if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
if (dev->irq_enabled)
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
...@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev) ...@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev)
drm_drawable_free_all(dev); drm_drawable_free_all(dev);
del_timer(&dev->timer); del_timer(&dev->timer);
/* Clear pid list */
if (dev->magicfree.next) {
list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
drm_ht_remove(&dev->magiclist);
}
/* Clear AGP information */ /* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) { if (drm_core_has_AGP(dev) && dev->agp &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_agp_mem *entry, *tempe; struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp /* Remove AGP resources, but leave dev->agp
...@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev) ...@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev)
dev->agp->acquired = 0; dev->agp->acquired = 0;
dev->agp->enabled = 0; dev->agp->enabled = 0;
} }
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_sg_cleanup(dev->sg); drm_sg_cleanup(dev->sg);
dev->sg = NULL; dev->sg = NULL;
} }
...@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev) ...@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev)
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
} }
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (!(r_list->map->flags & _DRM_DRIVER)) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) { for (i = 0; i < dev->queue_count; i++) {
if (dev->queuelist[i]) { if (dev->queuelist[i]) {
...@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev) ...@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev)
} }
dev->queue_count = 0; dev->queue_count = 0;
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET))
drm_dma_takedown(dev); drm_dma_takedown(dev);
if (dev->lock.hw_lock) { dev->dev_mapping = NULL;
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.file_priv = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("lastclose completed\n"); DRM_DEBUG("lastclose completed\n");
...@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver) ...@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver)
DRM_DEBUG("\n"); DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
...@@ -329,35 +325,24 @@ static void drm_cleanup(struct drm_device * dev) ...@@ -329,35 +325,24 @@ static void drm_cleanup(struct drm_device * dev)
drm_ht_remove(&dev->map_hash); drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev); drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_put_minor(&dev->primary); drm_put_minor(&dev->primary);
if (drm_put_dev(dev)) if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n"); DRM_ERROR("Cannot unload module\n");
} }
static int drm_minors_cleanup(int id, void *ptr, void *data)
{
struct drm_minor *minor = ptr;
struct drm_device *dev;
struct drm_driver *driver = data;
dev = minor->dev;
if (minor->dev->driver != driver)
return 0;
if (minor->type != DRM_MINOR_LEGACY)
return 0;
if (dev)
pci_dev_put(dev->pdev);
drm_cleanup(dev);
return 1;
}
void drm_exit(struct drm_driver *driver) void drm_exit(struct drm_driver *driver)
{ {
struct drm_device *dev, *tmp;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_cleanup(dev);
DRM_INFO("Module unloaded\n"); DRM_INFO("Module unloaded\n");
} }
...@@ -503,7 +488,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, ...@@ -503,7 +488,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
retcode = -EINVAL; retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->master)) { ((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) {
retcode = -EACCES; retcode = -EACCES;
} else { } else {
if (cmd & (IOC_IN | IOC_OUT)) { if (cmd & (IOC_IN | IOC_OUT)) {
......
This diff is collapsed.
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
*/ */
#include "drmP.h" #include "drmP.h"
#include "drm_sarea.h"
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
...@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, ...@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(struct drm_device * dev) static int drm_setup(struct drm_device * dev)
{ {
drm_local_map_t *map;
int i; int i;
int ret; int ret;
u32 sareapage;
if (dev->driver->firstopen) { if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev); ret = dev->driver->firstopen(dev);
...@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev) ...@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev)
return ret; return ret;
} }
dev->magicfree.next = NULL;
/* prebuild the SAREA */
sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0); atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
i = drm_dma_setup(dev); i = drm_dma_setup(dev);
if (i < 0) if (i < 0)
return i; return i;
...@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev) ...@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev)
for (i = 0; i < ARRAY_SIZE(dev->counts); i++) for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0); atomic_set(&dev->counts[i], 0);
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
dev->sigdata.lock = NULL; dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0; dev->queue_count = 0;
dev->queue_reserved = 0; dev->queue_reserved = 0;
dev->queue_slots = 0; dev->queue_slots = 0;
dev->queuelist = NULL; dev->queuelist = NULL;
dev->irq_enabled = 0;
dev->context_flag = 0; dev->context_flag = 0;
dev->interrupt_flag = 0; dev->interrupt_flag = 0;
dev->dma_flag = 0; dev->dma_flag = 0;
...@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp) ...@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp)
spin_lock(&dev->count_lock); spin_lock(&dev->count_lock);
if (!dev->open_count++) { if (!dev->open_count++) {
spin_unlock(&dev->count_lock); spin_unlock(&dev->count_lock);
return drm_setup(dev); retcode = drm_setup(dev);
goto out;
} }
spin_unlock(&dev->count_lock); spin_unlock(&dev->count_lock);
} }
out:
mutex_lock(&dev->struct_mutex);
if (minor->type == DRM_MINOR_LEGACY) {
BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping));
if (dev->dev_mapping == NULL)
dev->dev_mapping = inode->i_mapping;
}
mutex_unlock(&dev->struct_mutex);
return retcode; return retcode;
} }
...@@ -255,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, ...@@ -255,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->lock_count = 0; priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead); INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
if (dev->driver->driver_features & DRIVER_GEM) if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv); drm_gem_open(dev, priv);
...@@ -265,10 +263,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp, ...@@ -265,10 +263,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free; goto out_free;
} }
/* if there is no current master make this fd it */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (list_empty(&dev->filelist)) if (!priv->minor->master) {
priv->master = 1; /* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
ret = -ENOMEM;
goto out_free;
}
priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1;
mutex_unlock(&dev->struct_mutex);
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master);
if (ret) {
mutex_lock(&dev->struct_mutex);
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
mutex_unlock(&dev->struct_mutex);
goto out_free;
}
}
} else {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist); list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -314,6 +344,74 @@ int drm_fasync(int fd, struct file *filp, int on) ...@@ -314,6 +344,74 @@ int drm_fasync(int fd, struct file *filp, int on)
} }
EXPORT_SYMBOL(drm_fasync); EXPORT_SYMBOL(drm_fasync);
/*
* Reclaim locked buffers; note that this may be a bad idea if the current
* context doesn't have the hw lock...
*/
static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
{
struct drm_file *file_priv = f->private_data;
if (drm_i_have_hw_lock(dev, file_priv)) {
dev->driver->reclaim_buffers_locked(dev, file_priv);
} else {
unsigned long _end = jiffies + 3 * DRM_HZ;
int locked = 0;
drm_idlelock_take(&file_priv->master->lock);
/*
* Wait for a while.
*/
do {
spin_lock_bh(&file_priv->master->lock.spinlock);
locked = file_priv->master->lock.idle_has_lock;
spin_unlock_bh(&file_priv->master->lock.spinlock);
if (locked)
break;
schedule();
} while (!time_after_eq(jiffies, _end));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
"\tdriver to use reclaim_buffers_idlelocked() instead.\n"
"\tI will go on reclaiming the buffers anyway.\n");
}
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_idlelock_release(&file_priv->master->lock);
}
}
static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
if (dev->driver->reclaim_buffers_locked &&
file_priv->master->lock.hw_lock)
drm_reclaim_locked_buffers(dev, filp);
if (dev->driver->reclaim_buffers_idlelocked &&
file_priv->master->lock.hw_lock) {
drm_idlelock_take(&file_priv->master->lock);
dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
drm_idlelock_release(&file_priv->master->lock);
}
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, file_priv);
}
}
/** /**
* Release file. * Release file.
* *
...@@ -348,60 +446,9 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -348,60 +446,9 @@ int drm_release(struct inode *inode, struct file *filp)
(long)old_encode_dev(file_priv->minor->device), (long)old_encode_dev(file_priv->minor->device),
dev->open_count); dev->open_count);
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { /* if the master has gone away we can't do anything with the lock */
if (drm_i_have_hw_lock(dev, file_priv)) { if (file_priv->minor->master)
dev->driver->reclaim_buffers_locked(dev, file_priv); drm_master_release(dev, filp);
} else {
unsigned long endtime = jiffies + 3 * DRM_HZ;
int locked = 0;
drm_idlelock_take(&dev->lock);
/*
* Wait for a while.
*/
do{
spin_lock_bh(&dev->lock.spinlock);
locked = dev->lock.idle_has_lock;
spin_unlock_bh(&dev->lock.spinlock);
if (locked)
break;
schedule();
} while (!time_after_eq(jiffies, endtime));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
"\tdriver to use reclaim_buffers_idlelocked() instead.\n"
"\tI will go on reclaiming the buffers anyway.\n");
}
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_idlelock_release(&dev->lock);
}
}
if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
drm_idlelock_take(&dev->lock);
dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
drm_idlelock_release(&dev->lock);
}
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, file_priv);
}
if (dev->driver->driver_features & DRIVER_GEM) if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv); drm_gem_release(dev, file_priv);
...@@ -428,12 +475,24 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -428,12 +475,24 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_unlock(&dev->ctxlist_mutex); mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (file_priv->remove_auth_on_close == 1) {
if (file_priv->is_master) {
struct drm_file *temp; struct drm_file *temp;
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
temp->authenticated = 0;
}
list_for_each_entry(temp, &dev->filelist, lhead) if (file_priv->minor->master == file_priv->master) {
temp->authenticated = 0; /* drop the reference held my the minor */
drm_master_put(&file_priv->minor->master);
}
} }
/* drop the reference held my the file priv */
drm_master_put(&file_priv->master);
file_priv->is_master = 0;
list_del(&file_priv->lhead); list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -448,9 +507,9 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -448,9 +507,9 @@ int drm_release(struct inode *inode, struct file *filp)
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
spin_lock(&dev->count_lock); spin_lock(&dev->count_lock);
if (!--dev->open_count) { if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) { if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d %d\n", DRM_ERROR("Device busy: %d\n",
atomic_read(&dev->ioctl_count), dev->blocked); atomic_read(&dev->ioctl_count));
spin_unlock(&dev->count_lock); spin_unlock(&dev->count_lock);
unlock_kernel(); unlock_kernel();
return -EBUSY; return -EBUSY;
......
...@@ -64,6 +64,13 @@ ...@@ -64,6 +64,13 @@
* up at a later date, and as our interface with shmfs for memory allocation. * up at a later date, and as our interface with shmfs for memory allocation.
*/ */
/*
* We make up offsets for buffer objects so we can recognize them at
* mmap time.
*/
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
/** /**
* Initialize the GEM device fields * Initialize the GEM device fields
*/ */
...@@ -71,6 +78,8 @@ ...@@ -71,6 +78,8 @@
int int
drm_gem_init(struct drm_device *dev) drm_gem_init(struct drm_device *dev)
{ {
struct drm_gem_mm *mm;
spin_lock_init(&dev->object_name_lock); spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr); idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0); atomic_set(&dev->object_count, 0);
...@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev) ...@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev)
atomic_set(&dev->pin_memory, 0); atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0); atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0); atomic_set(&dev->gtt_memory, 0);
mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM);
if (!mm) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 19)) {
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
return -ENOMEM;
}
if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
drm_ht_remove(&mm->offset_hash);
return -ENOMEM;
}
return 0; return 0;
} }
void
drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
drm_mm_takedown(&mm->offset_manager);
drm_ht_remove(&mm->offset_hash);
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
dev->mm_private = NULL;
}
/** /**
* Allocate a GEM object of the specified size with shmfs backing store * Allocate a GEM object of the specified size with shmfs backing store
*/ */
...@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref) ...@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref)
} }
EXPORT_SYMBOL(drm_gem_object_handle_free); EXPORT_SYMBOL(drm_gem_object_handle_free);
/**
* drm_gem_mmap - memory map routine for GEM objects
* @filp: DRM file pointer
* @vma: VMA for the area to be mapped
*
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
* If we find the object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object), we set up the driver fault handler so that any accesses
* to the object can be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash;
unsigned long prot;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map ||
((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
ret = -EPERM;
goto out_unlock;
}
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
/* FIXME: use pgprot_writecombine when available */
prot = pgprot_val(vma->vm_page_prot);
#ifdef CONFIG_X86
prot |= _PAGE_CACHE_WC;
#endif
vma->vm_page_prot = __pgprot(prot);
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_gem_mmap);
...@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) ...@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_insert_item);
/* /*
* Just insert an item and return any "bits" bit key that hasn't been * Just insert an item and return any "bits" bit key that hasn't been
...@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) ...@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
ht->fill--; ht->fill--;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht) void drm_ht_remove(struct drm_open_hash *ht)
{ {
......
This diff is collapsed.
This diff is collapsed.
...@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ {
DECLARE_WAITQUEUE(entry, current); DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data; struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
int ret = 0; int ret = 0;
++file_priv->lock_count; ++file_priv->lock_count;
...@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current), lock->context, task_pid_nr(current),
dev->lock.hw_lock->lock, lock->flags); master->lock.hw_lock->lock, lock->flags);
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
if (lock->context < 0) if (lock->context < 0)
return -EINVAL; return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry); add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&dev->lock.spinlock); spin_lock_bh(&master->lock.spinlock);
dev->lock.user_waiters++; master->lock.user_waiters++;
spin_unlock_bh(&dev->lock.spinlock); spin_unlock_bh(&master->lock.spinlock);
for (;;) { for (;;) {
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) { if (!master->lock.hw_lock) {
/* Device has been unregistered */ /* Device has been unregistered */
ret = -EINTR; ret = -EINTR;
break; break;
} }
if (drm_lock_take(&dev->lock, lock->context)) { if (drm_lock_take(&master->lock, lock->context)) {
dev->lock.file_priv = file_priv; master->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies; master->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */ break; /* Got lock */
} }
...@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
break; break;
} }
} }
spin_lock_bh(&dev->lock.spinlock); spin_lock_bh(&master->lock.spinlock);
dev->lock.user_waiters--; master->lock.user_waiters--;
spin_unlock_bh(&dev->lock.spinlock); spin_unlock_bh(&master->lock.spinlock);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry); remove_wait_queue(&master->lock.lock_queue, &entry);
DRM_DEBUG("%d %s\n", lock->context, DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock"); ret ? "interrupted" : "has lock");
...@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* don't set the block all signals on the master process for now /* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb * really probably not the correct answer but lets us debug xkb
* xserver for now */ * xserver for now */
if (!file_priv->master) { if (!file_priv->is_master) {
sigemptyset(&dev->sigmask); sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP); sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU); sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock->context; dev->sigdata.context = lock->context;
dev->sigdata.lock = dev->lock.hw_lock; dev->sigdata.lock = master->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
} }
...@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ {
struct drm_lock *lock = data; struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
if (lock->context == DRM_KERNEL_CONTEXT) { if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n", DRM_ERROR("Process %d using kernel context %d\n",
...@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (dev->driver->kernel_context_switch_unlock) if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev); dev->driver->kernel_context_switch_unlock(dev);
else { else {
if (drm_lock_free(&dev->lock,lock->context)) { if (drm_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */ /* FIXME: Should really bail out here. */
} }
} }
...@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release); ...@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{ {
return (file_priv->lock_count && dev->lock.hw_lock && struct drm_master *master = file_priv->master;
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && return (file_priv->lock_count && master->lock.hw_lock &&
dev->lock.file_priv == file_priv); _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
} }
EXPORT_SYMBOL(drm_i_have_hw_lock); EXPORT_SYMBOL(drm_i_have_hw_lock);
...@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm) ...@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM); drm_free(entry, sizeof(*entry), DRM_MEM_MM);
} }
EXPORT_SYMBOL(drm_mm_takedown);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
} }
} }
obj_priv->tiling_mode = args->tiling_mode; obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment