Commit 40c7f211 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (177 commits)
  drm/radeon: fixup refcounts in radeon dumb create ioctl.
  drm: radeon: *_cs_packet_parse_vline() cleanup
  radeon: merge list_del()/list_add_tail() to list_move_tail()
  drm: Retry i2c transfer of EDID block after failure
  drm/radeon/kms: fix typo in atom overscan setup
  drm: Hold the mode mutex whilst probing for sysfs status
  drm/nouveau: fix __nouveau_fence_wait performance
  drm/nv40: attempt to reserve just enough vram for all 32 channels
  drm/nv50: check for vm traps on every gr irq
  drm/nv50: decode vm faults some more
  drm/nouveau: add nouveau_enum_find() util function
  drm/nouveau: properly handle pushbuffer check failures
  drm/nvc0: remove vm hack forcing large/small pages to not share a PDE
  drm/i915: disable opregion lid detection for now.
  drm/i915: Only wait on a pending flip if we intend to write to the buffer
  drm/i915/dp: Sanity check eDP existence
  drm: add cap bit to denote if dumb ioctl is available or not.
  drm/core: add ioctl to query device/driver capabilities
  drm/radeon/kms: allow max clock of 340 Mhz on hdmi 1.3+
  drm/radeon/kms: add cayman pci ids
  ...
parents b04d0a90 c87a8d8d
......@@ -73,32 +73,17 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_I810
tristate "Intel I810"
# BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP
depends on DRM && AGP && AGP_INTEL && BKL
# !PREEMPT because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
for this driver to work.
choice
prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
depends on DRM && AGP && AGP_INTEL
optional
config DRM_I830
tristate "i830 driver"
# BKL usage in order to avoid AB-BA deadlocks, i830 may get removed
depends on BKL
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM or 865G integrated graphics. If M is selected, the
module will be called i830. AGP support is required for this driver
to work. This driver is used by the older X releases X.org 6.7 and
XFree86 4.3. If unsure, build this and i915 as modules and the X server
will load the correct one.
config DRM_I915
tristate "i915 driver"
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
......@@ -115,12 +100,20 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
module will be called i915. AGP support is required for this driver
to work. This driver is used by the Intel driver in X.org 6.8 and
XFree86 4.4 and above. If unsure, build this and i830 as modules and
the X server will load the correct one.
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
If M is selected, the module will be called i915. AGP support
is required for this driver to work. This driver is used by
the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
replaces the older i830 module that supported a subset of the
hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
......@@ -132,8 +125,6 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
endchoice
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI
......
......@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o
drm_trace_points.o drm_global.o drm_usb.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
......@@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
......
......@@ -2694,3 +2694,36 @@ void drm_mode_config_reset(struct drm_device *dev)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_create_dumb *args = data;
if (!dev->driver->dumb_create)
return -ENOSYS;
return dev->driver->dumb_create(file_priv, dev, args);
}
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_map_dumb *args = data;
/* call driver ioctl to get mmap offset */
if (!dev->driver->dumb_map_offset)
return -ENOSYS;
return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_destroy_dumb *args = data;
if (!dev->driver->dumb_destroy)
return -ENOSYS;
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
......@@ -67,6 +67,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
......@@ -150,7 +151,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
......@@ -234,49 +238,6 @@ int drm_lastclose(struct drm_device * dev)
return 0;
}
/**
* Module initialization. Called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported).
*
* \return zero on success or a negative number on failure.
*
* Initializes an array of drm_device structures, and attempts to
* initialize all available devices, using consecutive minors, registering the
* stubs and initializing the device.
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_init(struct drm_driver *driver)
{
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
return drm_platform_init(driver);
else
return drm_pci_init(driver);
}
EXPORT_SYMBOL(drm_init);
void drm_exit(struct drm_driver *driver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(&driver->pci_driver);
} else {
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
}
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_exit);
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
......
......@@ -230,24 +230,32 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
int ret, retries = 5;
if (i2c_transfer(adapter, msgs, 2) == 2)
return 0;
/* The core i2c driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
* of the individual block a few times seems to overcome this.
*/
do {
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
} while (ret != 2 && --retries);
return -1;
return ret == 2 ? 0 : -1;
}
static u8 *
......@@ -449,12 +457,11 @@ static void edid_fixup_preferred(struct drm_connector *connector,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
struct drm_display_mode *mode = NULL;
int i;
struct drm_display_mode *ptr, *mode;
mode = NULL;
for (i = 0; i < drm_num_dmt_modes; i++) {
ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
......@@ -885,7 +892,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
}
static bool
mode_is_rb(struct drm_display_mode *mode)
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
......@@ -894,7 +901,8 @@ mode_is_rb(struct drm_display_mode *mode)
}
static bool
mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
......@@ -910,7 +918,8 @@ mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
}
static bool
mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
mode_in_vsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
......@@ -941,7 +950,7 @@ range_pixel_clock(struct edid *edid, u8 *t)
}
static bool
mode_in_range(struct drm_display_mode *mode, struct edid *edid,
mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
......@@ -1472,7 +1481,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
struct drm_display_mode *mode, *ptr;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
......@@ -1482,7 +1491,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
......
......@@ -32,7 +32,7 @@
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
static struct drm_display_mode drm_dmt_modes[] = {
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
......@@ -266,7 +266,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
static struct drm_display_mode edid_est_modes[] = {
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
......
......@@ -627,6 +627,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
if (info->var.transp.length > 0) {
u32 mask = (1 << info->var.transp.length) - 1;
mask <<= info->var.transp.offset;
value |= mask;
}
palette[regno] = value;
return 0;
}
......
......@@ -101,7 +101,7 @@ drm_gem_init(struct drm_device *dev)
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 19)) {
if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
......@@ -181,7 +181,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
static int
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
......@@ -214,6 +214,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference
......
......@@ -39,27 +39,18 @@
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int i;
unsigned int size = 1 << order;
ht->size = 1 << order;
ht->order = order;
ht->fill = 0;
ht->table = NULL;
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
if (!ht->use_vmalloc) {
ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
}
if (!ht->table) {
ht->use_vmalloc = 1;
ht->table = vmalloc(ht->size*sizeof(*ht->table));
}
if (size <= PAGE_SIZE / sizeof(*ht->table))
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
else
ht->table = vzalloc(size*sizeof(*ht->table));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
for (i=0; i< ht->size; ++i) {
INIT_HLIST_HEAD(&ht->table[i]);
}
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
......@@ -180,7 +171,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init(list);
ht->fill--;
return 0;
}
return -EINVAL;
......@@ -189,7 +179,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
......@@ -197,10 +186,10 @@ EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
if (ht->use_vmalloc)
vfree(ht->table);
else
if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
kfree(ht->table);
else
vfree(ht->table);
ht->table = NULL;
}
}
......
......@@ -47,30 +47,19 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
const char *bus_name;
if (!master)
return 0;
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
if (master->unique) {
seq_printf(m, "%s %s %s\n",
dev->driver->platform_device->name,
dev_name(dev->dev), master->unique);
} else {
seq_printf(m, "%s\n",
dev->driver->platform_device->name);
}
bus_name = dev->driver->bus->get_name(dev);
if (master->unique) {
seq_printf(m, "%s %s %s\n",
bus_name,
dev_name(dev->dev), master->unique);
} else {
if (master->unique) {
seq_printf(m, "%s %s %s\n",
dev->driver->pci_driver.name,
dev_name(dev->dev), master->unique);
} else {
seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
dev_name(dev->dev));
}
seq_printf(m, "%s %s\n",
bus_name, dev_name(dev->dev));
}
return 0;
}
......
......@@ -96,7 +96,7 @@ int drm_setunique(struct drm_device *dev, void *data,
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
int domain, bus, slot, func, ret;
int ret;
if (master->unique_len || master->unique)
return -EBUSY;
......@@ -104,50 +104,12 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
}
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
master->unique[master->unique_len] = '\0';
dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
master->unique);
if (!dev->driver->bus->set_unique)
return -EINVAL;
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
ret = dev->driver->bus->set_unique(dev, master, u);
if (ret)
goto err;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
(func != PCI_FUNC(dev->pdev->devfn))) {
ret = -EINVAL;
goto err;
}
return 0;
......@@ -159,74 +121,15 @@ int drm_setunique(struct drm_device *dev, void *data,
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
int len, ret;
int ret;
if (master->unique != NULL)
drm_unset_busid(dev, master);
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
master->unique_len = 10 + strlen(dev->platformdev->name);
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"platform:%s", dev->platformdev->name);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
ret = -EINVAL;
goto err;
}
dev->devname =
kmalloc(strlen(dev->platformdev->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->platformdev->name,
master->unique);
} else {
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
dev->devname =
kmalloc(strlen(dev->driver->pci_driver.name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
master->unique);
}
ret = dev->driver->bus->set_busid(dev, master);
if (ret)
goto err;
return 0;
err:
drm_unset_busid(dev, master);
return ret;
......@@ -364,6 +267,25 @@ int drm_getstats(struct drm_device *dev, void *data,
return 0;
}
/**
* Get device/driver capabilities
*/
int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_get_cap *req = data;
req->value = 0;
switch (req->capability) {
case DRM_CAP_DUMB_BUFFER:
if (dev->driver->dumb_create)
req->value = 1;
break;
default:
return -EINVAL;
}
return 0;
}
/**
* Setversion ioctl.
*
......
......@@ -74,23 +74,13 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
if (!dev->driver->bus->irq_by_busid)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
return dev->driver->bus->irq_by_busid(dev, p);
}
/*
......
This diff is collapsed.
......@@ -593,7 +593,7 @@ EXPORT_SYMBOL(drm_mode_height);
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
int drm_mode_hsync(struct drm_display_mode *mode)
int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
......@@ -627,7 +627,7 @@ EXPORT_SYMBOL(drm_mode_hsync);
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
*/
int drm_mode_vrefresh(struct drm_display_mode *mode)
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
......@@ -725,7 +725,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
......
......@@ -125,6 +125,176 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
/* For historical reasons, drm_get_pci_domain() is busticated
* on most archs and has to remain so for userspace interface
* < 1.4, except on alpha which was right from the beginning
*/
if (dev->if_version < 0x10004)
return 0;
#endif /* __alpha__ */
return pci_domain_nr(dev->pdev->bus);
}
static int drm_pci_get_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
static const char *drm_pci_get_name(struct drm_device *dev)
{
struct pci_driver *pdriver = dev->driver->kdriver.pci;
return pdriver->name;
}
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
dev->devname =
kmalloc(strlen(pdriver->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", pdriver->name,
master->unique);
return 0;
err:
return ret;
}
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
}
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
master->unique[master->unique_len] = '\0';
bus_name = dev->driver->bus->get_name(dev);
dev->devname = kmalloc(strlen(bus_name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", bus_name,
master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
goto err;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
(func != PCI_FUNC(dev->pdev->devfn))) {
ret = -EINVAL;
goto err;
}
return 0;
err:
return ret;
}
int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
int drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
if (drm_core_has_MTRR(dev)) {
if (dev->agp)
dev->agp->agp_mtrr =
mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024, MTRR_TYPE_WRCOMB, 1);
}
}
return 0;
}
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.agp_init = drm_pci_agp_init,
};
/**
* Register.
*
......@@ -219,7 +389,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
EXPORT_SYMBOL(drm_get_pci_dev);
/**
* PCI device initialization. Called via drm_init at module load time,
* PCI device initialization. Called direct from modules at load time.
*
* \return zero on success or a negative number on failure.
*
......@@ -229,18 +399,24 @@ EXPORT_SYMBOL(drm_get_pci_dev);
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_pci_init(struct drm_driver *driver)
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.pci = pdriver;
driver->bus = &drm_pci_bus;
if (driver->driver_features & DRIVER_MODESET)
return pci_register_driver(&driver->pci_driver);
return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = &driver->pci_driver.id_table[i];
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
......@@ -265,10 +441,27 @@ int drm_pci_init(struct drm_driver *driver)
#else
int drm_pci_init(struct drm_driver *driver)
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
return -1;
}
#endif
EXPORT_SYMBOL(drm_pci_init);
/*@}*/
void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(pdriver);
} else {
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
}
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_pci_exit);
......@@ -109,8 +109,60 @@ int drm_get_platform_dev(struct platform_device *platdev,
}
EXPORT_SYMBOL(drm_get_platform_dev);
static int drm_platform_get_irq(struct drm_device *dev)
{
return platform_get_irq(dev->platformdev, 0);
}
static const char *drm_platform_get_name(struct drm_device *dev)
{
return dev->platformdev->name;
}
static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
master->unique_len = 10 + strlen(dev->platformdev->name);
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"platform:%s", dev->platformdev->name);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
ret = -EINVAL;
goto err;
}
dev->devname =
kmalloc(strlen(dev->platformdev->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->platformdev->name,
master->unique);
return 0;
err:
return ret;
}
static struct drm_bus drm_platform_bus = {
.bus_type = DRIVER_BUS_PLATFORM,
.get_irq = drm_platform_get_irq,
.get_name = drm_platform_get_name,
.set_busid = drm_platform_set_busid,
};
/**
* Platform device initialization. Called via drm_init at module load time,
* Platform device initialization. Called direct from modules.
*
* \return zero on success or a negative number on failure.
*
......@@ -121,7 +173,24 @@ EXPORT_SYMBOL(drm_get_platform_dev);
* after the initialization for driver customization.
*/
int drm_platform_init(struct drm_driver *driver)
int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
return drm_get_platform_dev(driver->platform_device, driver);
DRM_DEBUG("\n");
driver->kdriver.platform_device = platform_device;
driver->bus = &drm_platform_bus;
INIT_LIST_HEAD(&driver->device_list);
return drm_get_platform_dev(platform_device, driver);
}
EXPORT_SYMBOL(drm_platform_init);
void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_platform_exit);
......@@ -269,25 +269,14 @@ int drm_fill_in_dev(struct drm_device *dev,
dev->driver = driver;
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
retcode = -EINVAL;
if (dev->driver->bus->agp_init) {
retcode = dev->driver->bus->agp_init(dev);
if (retcode)
goto error_out_unreg;
}
if (drm_core_has_MTRR(dev)) {
if (dev->agp)
dev->agp->agp_mtrr =
mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024, MTRR_TYPE_WRCOMB, 1);
}
}
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
......@@ -425,7 +414,6 @@ int drm_put_minor(struct drm_minor **minor_p)
*
* Cleans up all DRM device, calling drm_lastclose().
*
* \sa drm_init
*/
void drm_put_dev(struct drm_device *dev)
{
......@@ -475,6 +463,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
if (dev->devname) {
kfree(dev->devname);
dev->devname = NULL;
......
......@@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
int ret;
ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
if (ret)
return ret;
status = connector->funcs->detect(connector, true);
mutex_unlock(&connector->dev->mode_config.mutex);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
......
#include "drmP.h"
#include <linux/usb.h>
#ifdef CONFIG_USB
int drm_get_usb_dev(struct usb_interface *interface,
const struct usb_device_id *id,
struct drm_driver *driver)
{
struct drm_device *dev;
struct usb_device *usbdev;
int ret;
DRM_DEBUG("\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
usbdev = interface_to_usbdev(interface);
dev->usbdev = usbdev;
dev->dev = &usbdev->dev;
mutex_lock(&drm_global_mutex);
ret = drm_fill_in_dev(dev, NULL, driver);
if (ret) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g1;
}
usb_set_intfdata(interface, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g1;
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_g2;
if (dev->driver->load) {
ret = dev->driver->load(dev, 0);
if (ret)
goto err_g3;
}
/* setup the grouping for the legacy output */
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g3;
list_add_tail(&dev->driver_item, &driver->device_list);
mutex_unlock(&drm_global_mutex);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary->index);
return 0;
err_g3:
drm_put_minor(&dev->primary);
err_g2:
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_usb_dev);
static int drm_usb_get_irq(struct drm_device *dev)
{
return 0;
}
static const char *drm_usb_get_name(struct drm_device *dev)
{
return "USB";
}
static int drm_usb_set_busid(struct drm_device *dev,
struct drm_master *master)
{
return 0;
}
static struct drm_bus drm_usb_bus = {
.bus_type = DRIVER_BUS_USB,
.get_irq = drm_usb_get_irq,
.get_name = drm_usb_get_name,
.set_busid = drm_usb_set_busid,
};
int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
{
int res;
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.usb = udriver;
driver->bus = &drm_usb_bus;
res = usb_register(udriver);
return res;
}
EXPORT_SYMBOL(drm_usb_init);
void drm_usb_exit(struct drm_driver *driver,
struct usb_driver *udriver)
{
usb_deregister(udriver);
}
EXPORT_SYMBOL(drm_usb_exit);
#endif
......@@ -37,7 +37,6 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
......@@ -94,7 +93,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
lock_kernel();
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
......@@ -104,7 +102,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
......@@ -116,7 +113,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = i810_ioctl,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
.llseek = noop_llseek,
......@@ -1242,19 +1239,6 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
/*
* call the drm_ioctl under the big kernel lock because
* to lock against the i810_mmap_buffers function.
*/
long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
lock_kernel();
ret = drm_ioctl(file, cmd, arg);
unlock_kernel();
return ret;
}
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
......
......@@ -57,18 +57,13 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = i810_ioctl,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
......@@ -77,15 +72,24 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver i810_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
static int __init i810_init(void)
{
if (num_possible_cpus() > 1) {
pr_err("drm/i810 does not support SMP\n");
return -EINVAL;
}
driver.num_ioctls = i810_max_ioctl;
return drm_init(&driver);
return drm_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
drm_exit(&driver);
drm_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
......
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
i830-y := i830_drv.o i830_dma.o i830_irq.o
obj-$(CONFIG_DRM_I830) += i830.o
This diff is collapsed.
/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#ifndef _I830_DRV_H_
#define _I830_DRV_H_
/* General customization:
*/
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "i830"
#define DRIVER_DESC "Intel 830M"
#define DRIVER_DATE "20021108"
/* Interface history:
*
* 1.1: Original.
* 1.2: ?
* 1.3: New irq emit/wait ioctls.
* New pageflip ioctl.
* New getparam ioctl.
* State for texunits 3&4 in sarea.
* New (alternative) layout for texture state.
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 2
/* Driver will work either way: IRQ's save cpu time when waiting for
* the card, but are subject to subtle interactions between bios,
* hardware and the driver.
*/
/* XXX: Add vblank support? */
#define USE_IRQS 0
typedef struct drm_i830_buf_priv {
u32 *in_use;
int my_use_idx;
int currently_mapped;
void __user *virtual;
void *kernel_virtual;
drm_local_map_t map;
} drm_i830_buf_priv_t;
typedef struct _drm_i830_ring_buffer {
int tail_mask;
unsigned long Start;
unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
} drm_i830_ring_buffer_t;
typedef struct drm_i830_private {
struct drm_local_map *sarea_map;
struct drm_local_map *mmio_map;
drm_i830_sarea_t *sarea_priv;
drm_i830_ring_buffer_t ring;
void *hw_status_page;
unsigned long counter;
dma_addr_t dma_status_page;
struct drm_buf *mmap_buffer;
u32 front_di1, back_di1, zi1;
int back_offset;
int depth_offset;
int front_offset;
int w, h;
int pitch;
int back_pitch;
int depth_pitch;
unsigned int cpp;
int do_boxes;
int dma_used;
int current_page;
int page_flipping;
wait_queue_head_t irq_queue;
atomic_t irq_received;
atomic_t irq_emitted;
int use_mi_batchbuffer_start;
} drm_i830_private_t;
long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern struct drm_ioctl_desc i830_ioctls[];
extern int i830_max_ioctl;
/* i830_irq.c */
extern int i830_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
extern void i830_driver_irq_preinstall(struct drm_device *dev);
extern void i830_driver_irq_postinstall(struct drm_device *dev);
extern void i830_driver_irq_uninstall(struct drm_device *dev);
extern int i830_driver_load(struct drm_device *, unsigned long flags);
extern void i830_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
extern void i830_driver_lastclose(struct drm_device *dev);
extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern int i830_driver_dma_quiescent(struct drm_device *dev);
extern int i830_driver_device_is_agp(struct drm_device *dev);
#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
#define I830_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4, __func__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
virt = dev_priv->ring.virtual_start; \
} while (0)
#define OUT_RING(n) do { \
if (I830_VERBOSE) \
printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outcount++; \
outring += 4; \
outring &= ringmask; \
} while (0)
#define ADVANCE_LP_RING() do { \
if (I830_VERBOSE) \
printk("ADVANCE_LP_RING %x\n", outring); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I830_WRITE(LP_RING + RING_TAIL, outring); \
} while (0)
extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
#define LOAD_TEXTURE_MAP0 (1<<11)
#define INST_PARSER_CLIENT 0x00000000
#define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
#define I830REG_HWSTAM 0x02098
#define I830REG_INT_IDENTITY_R 0x020a4
#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x0xFFFFF000
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x001FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
#define SCI_YMAX_MASK (0xffff<<16)
#define SCI_XMAX_MASK (0xffff<<0)
#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22)
#define CMD_3D (0x3<<29)
#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
#define BR00_BITBLT_CLIENT 0x40000000
#define BR00_OP_COLOR_BLT 0x10000000
#define BR00_OP_SRC_COPY_BLT 0x10C00000
#define BR13_SOLID_PATTERN 0x80000000
#define BUF_3D_ID_COLOR_BACK (0x3<<24)
#define BUF_3D_ID_DEPTH (0x7<<24)
#define BUF_3D_USE_FENCE (1<<23)
#define BUF_3D_PITCH(x) (((x)/4)<<2)
#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
#define MAP_PALETTE_BOTH (1<<11)
#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
#define XY_COLOR_BLT_WRITE_RGB (1<<20)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
#endif
/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
*
* Copyright 2002 Tungsten Graphics, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Keith Whitwell <keith@tungstengraphics.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
u16 temp;
temp = I830_READ16(I830REG_INT_IDENTITY_R);
DRM_DEBUG("%x\n", temp);
if (!(temp & 2))
return IRQ_NONE;
I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
atomic_inc(&dev_priv->irq_received);
wake_up_interruptible(&dev_priv->irq_queue);
return IRQ_HANDLED;
}
static int i830_emit_irq(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("%s\n", __func__);
atomic_inc(&dev_priv->irq_emitted);
BEGIN_LP_RING(2);
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
ADVANCE_LP_RING();
return atomic_read(&dev_priv->irq_emitted);
}
static int i830_wait_irq(struct drm_device *dev, int irq_nr)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
unsigned long end = jiffies + HZ * 3;
int ret = 0;
DRM_DEBUG("%s\n", __func__);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
return 0;
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
add_wait_queue(&dev_priv->irq_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
break;
if ((signed)(end - jiffies) <= 0) {
DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
I830_READ16(I830REG_INT_IDENTITY_R),
I830_READ16(I830REG_INT_MASK_R),
I830_READ16(I830REG_INT_ENABLE_R),
I830_READ16(I830REG_HWSTAM));
ret = -EBUSY; /* Lockup? Missed irq? */
break;
}
schedule_timeout(HZ * 3);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev_priv->irq_queue, &entry);
return ret;
}
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_emit_t *emit = data;
int result;
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return -EINVAL;
}
result = i830_emit_irq(dev);
if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}
/* Doesn't need the hardware lock.
*/
int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return -EINVAL;
}
return i830_wait_irq(dev, irqwait->irq_seq);
}
/* drm_dma.h hooks
*/
void i830_driver_irq_preinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_HWSTAM, 0xffff);
I830_WRITE16(I830REG_INT_MASK_R, 0x0);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
atomic_set(&dev_priv->irq_received, 0);
atomic_set(&dev_priv->irq_emitted, 0);
init_waitqueue_head(&dev_priv->irq_queue);
}
void i830_driver_irq_postinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
}
void i830_driver_irq_uninstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
if (!dev_priv)
return;
I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
}
......@@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct intel_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
const char *pipe = crtc->pipe ? "B" : "A";
const char *plane = crtc->plane ? "B" : "A";
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
spin_lock_irqsave(&dev->event_lock, flags);
work = crtc->unpin_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %s (plane %s)\n",
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
if (!work->pending) {
seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
if (work->enable_stall_check)
......@@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
......@@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
seq_printf(m, "Pipe A stat: %08x\n",
I915_READ(PIPEASTAT));
seq_printf(m, "Pipe B stat: %08x\n",
I915_READ(PIPEBSTAT));
for_each_pipe(pipe)
seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
} else {
seq_printf(m, "North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
......@@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
volatile u32 *hws;
const volatile u32 __iomem *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
hws = (volatile u32 *)ring->status_page.page_addr;
hws = (volatile u32 __iomem *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
......@@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
u8 *virt = ring->virtual_start;
const u8 __iomem *virt = ring->virtual_start;
uint32_t off;
for (off = 0; off < ring->size; off += 4) {
......@@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
if (error->ringbuffer) {
struct drm_i915_error_object *obj = error->ringbuffer;
seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
offset += 4;
for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
if (error->ringbuffer[i]) {
struct drm_i915_error_object *obj = error->ringbuffer[i];
seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
seq_printf(m, "%08x : %08x\n",
offset,
obj->pages[page][elt]);
offset += 4;
}
}
}
}
......@@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 rpstat;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
/* RPSTAT1 is in the GT power well */
__gen6_gt_force_wake_get(dev_priv);
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
rpprevup = I915_READ(GEN6_RP_PREV_UP);
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
GEN6_CAGF_SHIFT) * 100);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK);
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
......@@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0, 0},
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
......
......@@ -43,6 +43,17 @@
#include <linux/slab.h>
#include <acpi/video.h>
static void i915_write_hws_pga(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
......@@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
ring->status_page.page_addr =
(void __force __iomem *)dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
0xf0;
i915_write_hws_pga(dev);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
......@@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
......@@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_INFO(dev)->gen >= 4;
break;
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
......@@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
" G33 hw status page\n");
return -ENOMEM;
}
ring->status_page.page_addr = dev_priv->hws_map.handle;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
ring->status_page.page_addr =
(void __force __iomem *)dev_priv->hws_map.handle;
memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
......@@ -2013,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_gem_unload;
......
......@@ -43,10 +43,13 @@ module_param_named(modeset, i915_modeset, int, 0400);
unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
int i915_panel_ignore_lid = 0;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_semaphores = 0;
unsigned int i915_semaphores = 1;
module_param_named(semaphores, i915_semaphores, int, 0600);
unsigned int i915_enable_rc6 = 0;
......@@ -58,7 +61,10 @@ module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
unsigned int i915_panel_use_ssc = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
bool i915_try_reset = true;
int i915_vbt_sdvo_panel_type = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
static bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver;
......@@ -716,6 +722,9 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
......@@ -732,14 +741,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
.driver.pm = &i915_pm_ops,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
......@@ -748,6 +749,14 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver i915_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
.driver.pm = &i915_pm_ops,
};
static int __init i915_init(void)
{
if (!intel_agp_enabled) {
......@@ -781,12 +790,12 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET))
driver.get_vblank_timestamp = NULL;
return drm_init(&driver);
return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
drm_exit(&driver);
drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);
......
This diff is collapsed.
This diff is collapsed.
......@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev)
}
#endif /* WATCH_INACTIVE */
#if WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
{
uint32_t *mem = kmap_atomic(page, KM_USER0);
int i;
for (i = start; i < end; i += 4)
DRM_INFO("%08x: %08x%s\n",
(int) (bias + i), mem[i / 4],
(bias + i == mark) ? " ********" : "");
kunmap_atomic(mem, KM_USER0);
/* give syslog time to catch up */
msleep(1);
}
void
i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
page_len = len - page * PAGE_SIZE;
if (page_len > PAGE_SIZE)
page_len = PAGE_SIZE;
for (chunk = 0; chunk < page_len; chunk += 128) {
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
}
}
#endif
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
......
......@@ -30,6 +30,7 @@
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
......@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
return 0;
}
trace_i915_gem_evict(dev, min_size, alignment, mappable);
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
......@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
if (lists_empty)
return -ENOSPC;
trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)
......
This diff is collapsed.
......@@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
int ret = 0;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
if (&obj->base == NULL)
return -ENOENT;
if (!i915_tiling_ok(dev,
......@@ -384,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
if (&obj->base == NULL)
return -ENOENT;
mutex_lock(&dev->struct_mutex);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -226,29 +226,49 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
int index;
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = i915_vbt_sdvo_panel_type;
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
if (!dvo_timing)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
fill_detail_timing_data(panel_fixed_mode,
dvo_timing + sdvo_lvds_options->panel_type);
fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
return;
DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
}
static int intel_bios_ssc_frequency(struct drm_device *dev,
bool alternate)
{
switch (INTEL_INFO(dev)->gen) {
case 2:
return alternate ? 66 : 48;
case 3:
case 4:
return alternate ? 100 : 96;
default:
return alternate ? 100 : 120;
}
}
static void
......@@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
if (IS_I85X(dev))
dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
else if (IS_GEN5(dev) || IS_GEN6(dev))
dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
else
dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
}
}
......@@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
/* LFP panel data */
......@@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
dev_priv->lvds_use_ssc = 0;
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
/* eDP data */
dev_priv->edp.bpp = 18;
......
......@@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
u32 adpa, dpll_md;
u32 adpa_reg;
if (intel_crtc->pipe == 0)
dpll_md_reg = DPLL_A_MD;
else
dpll_md_reg = DPLL_B_MD;
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
......@@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= PORT_TRANS_A_SEL_CPT;
else
adpa |= ADPA_PIPE_A_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_B_SEL_CPT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(adpa_reg, adpa);
}
......@@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
DRM_DEBUG_KMS("starting load-detect on CRT\n");
if (pipe == 0) {
bclrpat_reg = BCLRPAT_A;
vtotal_reg = VTOTAL_A;
vblank_reg = VBLANK_A;
vsync_reg = VSYNC_A;
pipeconf_reg = PIPEACONF;
pipe_dsl_reg = PIPEADSL;
} else {
bclrpat_reg = BCLRPAT_B;
vtotal_reg = VTOTAL_B;
vblank_reg = VBLANK_B;
vsync_reg = VSYNC_B;
pipeconf_reg = PIPEBCONF;
pipe_dsl_reg = PIPEBDSL;
}
bclrpat_reg = BCLRPAT(pipe);
vtotal_reg = VTOTAL(pipe);
vblank_reg = VBLANK(pipe);
vsync_reg = VSYNC(pipe);
pipeconf_reg = PIPECONF(pipe);
pipe_dsl_reg = PIPEDSL(pipe);
save_bclrpat = I915_READ(bclrpat_reg);
save_vtotal = I915_READ(vtotal_reg);
......
This diff is collapsed.
This diff is collapsed.
......@@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
return dev_priv->pipe_to_crtc_mapping[pipe];
}
static inline struct drm_crtc *
intel_get_crtc_for_plane(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->plane_to_crtc_mapping[plane];
}
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
......@@ -230,6 +237,8 @@ struct intel_unpin_work {
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
......@@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
......@@ -321,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
extern int intel_overlay_switch_off(struct intel_overlay *overlay,
bool interruptible);
extern int intel_overlay_switch_off(struct intel_overlay *overlay);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
......
......@@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
......
......@@ -41,6 +41,7 @@ struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
......@@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
......@@ -278,6 +280,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
......@@ -305,6 +308,14 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_hdmi->color_range)
return 0;
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
goto done;
}
return -EINVAL;
done:
......@@ -363,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
}
intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
......
......@@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
bus->force_bit = intel_gpio_create(dev_priv, i);
if (IS_GEN2(dev))
bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment