Commit 3f4b5c5d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (37 commits)
  drm/i915: fix modeset devname allocation + agp init return check.
  drm/i915: Remove redundant test in error path.
  drm: Add a debug node for vblank state.
  drm: Avoid use-before-null-test on dev in drm_cleanup().
  drm/i915: Don't print to dmesg when taking signal during object_pin.
  drm: pin new and unpin old buffer when setting a mode.
  drm/i915: un-EXPORT and make 'intelfb_panic' static
  drm/i915: Delete unused, pointless i915_driver_firstopen.
  drm/i915: fix sparse warnings: returning void-valued expression
  drm/i915: fix sparse warnings: move 'extern' decls to header file
  drm/i915: fix sparse warnings: make symbols static
  drm/i915: fix sparse warnings: declare one-bit bitfield as unsigned
  drm/i915: Don't double-unpin buffers if we take a signal in evict_everything().
  drm/i915: Fix fbcon setup to align display pitch to 64b.
  drm/i915: Add missing userland definitions for gem init/execbuffer.
  i915/drm: provide compat defines for userspace for certain struct members.
  drm: drop DRM_IOCTL_MODE_REPLACEFB, add+remove works just as well.
  drm: sanitise drm modesetting API + remove unused hotplug
  drm: fix allowing master ioctls on non-master fds.
  drm/radeon: use locked rmmap to remove sarea mapping.
  ...
parents a4ba2e9e aa596629
......@@ -7,6 +7,8 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
......@@ -65,6 +67,10 @@ config DRM_I830
will load the correct one.
config DRM_I915
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
depends on FB
tristate "i915 driver"
help
Choose this option if you have a system that has Intel 830M, 845G,
......@@ -76,6 +82,17 @@ config DRM_I915
endchoice
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
help
Choose this option if you want kernel modesetting enabled by default,
and you have a new enough userspace to support this. Running old
userspaces with this enabled will cause pain. Note that this causes
the driver to bind to PCI devices, which precludes loading things
like intelfb.
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM
......
......@@ -9,7 +9,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
......
......@@ -45,14 +45,15 @@
* the one with matching magic number, while holding the drm_device::struct_mutex
* lock.
*/
static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
{
struct drm_file *retval = NULL;
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
mutex_lock(&dev->struct_mutex);
if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv;
}
......@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock.
*/
static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
drm_magic_t magic)
{
struct drm_magic_entry *entry;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
......@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &dev->magicfree);
drm_ht_insert_item(&master->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &master->magicfree);
mutex_unlock(&dev->struct_mutex);
return 0;
......@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
drm_ht_remove_item(&dev->magiclist, hash);
drm_ht_remove_item(&master->magiclist, hash);
list_del(&pt->head);
mutex_unlock(&dev->struct_mutex);
......@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
++sequence; /* reserve 0 */
auth->magic = sequence++;
spin_unlock(&lock);
} while (drm_find_file(dev, auth->magic));
} while (drm_find_file(file_priv->master, auth->magic));
file_priv->magic = auth->magic;
drm_add_magic(dev, file_priv, auth->magic);
drm_add_magic(file_priv->master, file_priv, auth->magic);
}
DRM_DEBUG("%u\n", auth->magic);
......@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file;
DRM_DEBUG("%u\n", auth->magic);
if ((file = drm_find_file(dev, auth->magic))) {
if ((file = drm_find_file(file_priv->master, auth->magic))) {
file->authenticated = 1;
drm_remove_magic(dev, auth->magic);
drm_remove_magic(file_priv->master, auth->magic);
return 0;
}
return -EINVAL;
......
......@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
return entry;
}
}
......@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
if (dev->lock.hw_lock != NULL) {
if (dev->primary->master->lock.hw_lock != NULL) {
vfree(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EBUSY;
}
dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
case _DRM_AGP: {
......@@ -261,6 +261,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
}
DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break;
}
case _DRM_SCATTER_GATHER:
......@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
list->master = dev->primary->master;
*maplist = list;
return 0;
}
......@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *maplist;
int err;
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
......@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
int found = 0;
struct drm_master *master;
/* Find the list entry for the map and remove it */
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
master = r_list->master;
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
......@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
break;
case _DRM_SHM:
vfree(map->handle);
if (master) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL; /* SHM removed */
master->lock.file_priv = NULL;
wake_up_interruptible(&master->lock.lock_queue);
}
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
......@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return 0;
}
EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
{
......
......@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
static int drm_context_switch_complete(struct drm_device * dev, int new)
static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
......@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0;
}
......@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
file_priv->remove_auth_on_close = 1;
}
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
......
This diff is collapsed.
This diff is collapsed.
......@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
......@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
......@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
*/
int drm_lastclose(struct drm_device * dev)
{
struct drm_magic_entry *pt, *next;
struct drm_map_list *r_list, *list_t;
struct drm_vma_entry *vma, *vma_temp;
int i;
......@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
if (dev->irq_enabled)
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
......@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev)
drm_drawable_free_all(dev);
del_timer(&dev->timer);
/* Clear pid list */
if (dev->magicfree.next) {
list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
drm_ht_remove(&dev->magiclist);
}
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
if (drm_core_has_AGP(dev) && dev->agp &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
......@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
......@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev)
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (!(r_list->map->flags & _DRM_DRIVER)) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
if (dev->queuelist[i]) {
......@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev)
}
dev->queue_count = 0;
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET))
drm_dma_takedown(dev);
if (dev->lock.hw_lock) {
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.file_priv = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("lastclose completed\n");
......@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver)
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
......@@ -329,35 +325,24 @@ static void drm_cleanup(struct drm_device * dev)
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_put_minor(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
static int drm_minors_cleanup(int id, void *ptr, void *data)
{
struct drm_minor *minor = ptr;
struct drm_device *dev;
struct drm_driver *driver = data;
dev = minor->dev;
if (minor->dev->driver != driver)
return 0;
if (minor->type != DRM_MINOR_LEGACY)
return 0;
if (dev)
pci_dev_put(dev->pdev);
drm_cleanup(dev);
return 1;
}
void drm_exit(struct drm_driver *driver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_cleanup(dev);
DRM_INFO("Module unloaded\n");
}
......@@ -503,7 +488,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) {
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
......
This diff is collapsed.
......@@ -35,7 +35,6 @@
*/
#include "drmP.h"
#include "drm_sarea.h"
#include <linux/poll.h>
#include <linux/smp_lock.h>
......@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(struct drm_device * dev)
{
drm_local_map_t *map;
int i;
int ret;
u32 sareapage;
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
......@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev)
return ret;
}
dev->magicfree.next = NULL;
/* prebuild the SAREA */
sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
i = drm_dma_setup(dev);
if (i < 0)
return i;
......@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev)
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq_enabled = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
......@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp)
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return drm_setup(dev);
retcode = drm_setup(dev);
goto out;
}
spin_unlock(&dev->count_lock);
}
out:
mutex_lock(&dev->struct_mutex);
if (minor->type == DRM_MINOR_LEGACY) {
BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping));
if (dev->dev_mapping == NULL)
dev->dev_mapping = inode->i_mapping;
}
mutex_unlock(&dev->struct_mutex);
return retcode;
}
......@@ -255,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
......@@ -265,10 +263,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
/* if there is no current master make this fd it */
mutex_lock(&dev->struct_mutex);
if (!priv->minor->master) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
ret = -ENOMEM;
goto out_free;
}
priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1;
mutex_unlock(&dev->struct_mutex);
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master);
if (ret) {
mutex_lock(&dev->struct_mutex);
if (list_empty(&dev->filelist))
priv->master = 1;
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
mutex_unlock(&dev->struct_mutex);
goto out_free;
}
}
} else {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
......@@ -314,61 +344,33 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
/**
* Release file.
*
* \param inode device inode
* \param file_priv DRM file private.
* \return zero on success or a negative number on failure.
*
* If the hardware lock is held then free it, and take it again for the kernel
* context since it's necessary to reclaim buffers. Unlink the file private
* data from its list and free it. Decreases the open count and if it reaches
* zero calls drm_lastclose().
/*
* Reclaim locked buffers; note that this may be a bad idea if the current
* context doesn't have the hw lock...
*/
int drm_release(struct inode *inode, struct file *filp)
static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
int retcode = 0;
lock_kernel();
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
*/
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->device),
dev->open_count);
struct drm_file *file_priv = f->private_data;
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
if (drm_i_have_hw_lock(dev, file_priv)) {
dev->driver->reclaim_buffers_locked(dev, file_priv);
} else {
unsigned long endtime = jiffies + 3 * DRM_HZ;
unsigned long _end = jiffies + 3 * DRM_HZ;
int locked = 0;
drm_idlelock_take(&dev->lock);
drm_idlelock_take(&file_priv->master->lock);
/*
* Wait for a while.
*/
do{
spin_lock_bh(&dev->lock.spinlock);
locked = dev->lock.idle_has_lock;
spin_unlock_bh(&dev->lock.spinlock);
do {
spin_lock_bh(&file_priv->master->lock.spinlock);
locked = file_priv->master->lock.idle_has_lock;
spin_unlock_bh(&file_priv->master->lock.spinlock);
if (locked)
break;
schedule();
} while (!time_after_eq(jiffies, endtime));
} while (!time_after_eq(jiffies, _end));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
......@@ -377,31 +379,76 @@ int drm_release(struct inode *inode, struct file *filp)
}
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_idlelock_release(&dev->lock);
}
drm_idlelock_release(&file_priv->master->lock);
}
}
if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
drm_idlelock_take(&dev->lock);
dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
drm_idlelock_release(&dev->lock);
if (dev->driver->reclaim_buffers_locked &&
file_priv->master->lock.hw_lock)
drm_reclaim_locked_buffers(dev, filp);
if (dev->driver->reclaim_buffers_idlelocked &&
file_priv->master->lock.hw_lock) {
drm_idlelock_take(&file_priv->master->lock);
dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
drm_idlelock_release(&file_priv->master->lock);
}
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, file_priv);
}
}
/**
* Release file.
*
* \param inode device inode
* \param file_priv DRM file private.
* \return zero on success or a negative number on failure.
*
* If the hardware lock is held then free it, and take it again for the kernel
* context since it's necessary to reclaim buffers. Unlink the file private
* data from its list and free it. Decreases the open count and if it reaches
* zero calls drm_lastclose().
*/
int drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
int retcode = 0;
lock_kernel();
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
*/
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->device),
dev->open_count);
/* if the master has gone away we can't do anything with the lock */
if (file_priv->minor->master)
drm_master_release(dev, filp);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
......@@ -428,12 +475,24 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
if (file_priv->remove_auth_on_close == 1) {
struct drm_file *temp;
list_for_each_entry(temp, &dev->filelist, lhead)
if (file_priv->is_master) {
struct drm_file *temp;
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
temp->authenticated = 0;
}
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
drm_master_put(&file_priv->minor->master);
}
}
/* drop the reference held my the file priv */
drm_master_put(&file_priv->master);
file_priv->is_master = 0;
list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
......@@ -448,9 +507,9 @@ int drm_release(struct inode *inode, struct file *filp)
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count), dev->blocked);
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
atomic_read(&dev->ioctl_count));
spin_unlock(&dev->count_lock);
unlock_kernel();
return -EBUSY;
......
......@@ -64,6 +64,13 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
/*
* We make up offsets for buffer objects so we can recognize them at
* mmap time.
*/
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
/**
* Initialize the GEM device fields
*/
......@@ -71,6 +78,8 @@
int
drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0);
......@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev)
atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0);
mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM);
if (!mm) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 19)) {
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
return -ENOMEM;
}
if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
drm_ht_remove(&mm->offset_hash);
return -ENOMEM;
}
return 0;
}
void
drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
drm_mm_takedown(&mm->offset_manager);
drm_ht_remove(&mm->offset_hash);
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
dev->mm_private = NULL;
}
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
......@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
/**
* drm_gem_mmap - memory map routine for GEM objects
* @filp: DRM file pointer
* @vma: VMA for the area to be mapped
*
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
* If we find the object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object), we set up the driver fault handler so that any accesses
* to the object can be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash;
unsigned long prot;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map ||
((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
ret = -EPERM;
goto out_unlock;
}
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
/* FIXME: use pgprot_writecombine when available */
prot = pgprot_val(vma->vm_page_prot);
#ifdef CONFIG_X86
prot |= _PAGE_CACHE_WC;
#endif
vma->vm_page_prot = __pgprot(prot);
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_gem_mmap);
......@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
return 0;
}
EXPORT_SYMBOL(drm_ht_insert_item);
/*
* Just insert an item and return any "bits" bit key that hasn't been
......@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
}
EXPORT_SYMBOL(drm_mm_takedown);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
......@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
static void drm_vm_open_locked(struct vm_area_struct *vma)
void drm_vm_open_locked(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
......@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
......
......@@ -8,7 +8,22 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_proc.o \
i915_gem_tiling.o
i915_gem_tiling.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
intel_sdvo.o \
intel_modes.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o
i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
mutex_unlock(&dev->struct_mutex);
......
This diff is collapsed.
......@@ -46,7 +46,8 @@
static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
struct drm_tex_region *list;
unsigned shift, nr;
unsigned start;
......
......@@ -257,8 +257,8 @@ void opregion_enable_asle(struct drm_device *dev)
static struct intel_opregion *system_opregion;
int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
void *data)
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -96,6 +96,8 @@ static struct drm_driver driver = {
.enable_vblank = radeon_enable_vblank,
.disable_vblank = radeon_disable_vblank,
.dri_library_name = dri_library_name,
.master_create = radeon_master_create,
.master_destroy = radeon_master_destroy,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
.irq_uninstall = radeon_driver_irq_uninstall,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
unifdef-y += drm.h drm_sarea.h
unifdef-y += drm.h drm_sarea.h drm_mode.h
unifdef-y += i810_drm.h
unifdef-y += i830_drm.h
unifdef-y += i915_drm.h
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment