Commit 8333f607 authored by Alex Deucher's avatar Alex Deucher

drm/radeon: remove UMS support

It's been deprecated behind a kconfig option for almost
two years and hasn't really been supported for years before
that.  DDX support was dropped more than three years ago.
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 80d69009
...@@ -5,12 +5,3 @@ config DRM_RADEON_USERPTR ...@@ -5,12 +5,3 @@ config DRM_RADEON_USERPTR
help help
This option selects CONFIG_MMU_NOTIFIER if it isn't already This option selects CONFIG_MMU_NOTIFIER if it isn't already
selected to enabled full userptr support. selected to enabled full userptr support.
config DRM_RADEON_UMS
bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
help
Choose this option if you still need userspace modesetting.
Userspace modesetting is deprecated for quite some time now, so
enable this only if you have ancient versions of the DDX drivers.
...@@ -58,10 +58,6 @@ $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h ...@@ -58,10 +58,6 @@ $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
radeon-y := radeon_drv.o radeon-y := radeon_drv.o
# add UMS driver
radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o drm_buffer.o
# add KMS driver # add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
......
/**************************************************************************
*
* Copyright 2010 Pauli Nieminen.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Multipart buffer for coping data which is larger than the page size.
*
* Authors:
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
#include <linux/export.h>
#include "drm_buffer.h"
/**
* Allocate the drm buffer object.
*
* buf: Pointer to a pointer where the object is stored.
* size: The number of bytes to allocate.
*/
int drm_buffer_alloc(struct drm_buffer **buf, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
/* Allocating pointer table to end of structure makes drm_buffer
* variable sized */
*buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
GFP_KERNEL);
if (*buf == NULL) {
DRM_ERROR("Failed to allocate drm buffer object to hold"
" %d bytes in %d pages.\n",
size, nr_pages);
return -ENOMEM;
}
(*buf)->size = size;
for (idx = 0; idx < nr_pages; ++idx) {
(*buf)->data[idx] =
kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
GFP_KERNEL);
if ((*buf)->data[idx] == NULL) {
DRM_ERROR("Failed to allocate %dth page for drm"
" buffer with %d bytes and %d pages.\n",
idx + 1, size, nr_pages);
goto error_out;
}
}
return 0;
error_out:
for (; idx >= 0; --idx)
kfree((*buf)->data[idx]);
kfree(*buf);
return -ENOMEM;
}
/**
* Copy the user data to the begin of the buffer and reset the processing
* iterator.
*
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
if (size > buf->size) {
DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
" %d bytes space\n",
size, buf->size);
return -EFAULT;
}
for (idx = 0; idx < nr_pages; ++idx) {
if (copy_from_user(buf->data[idx],
user_data + idx * PAGE_SIZE,
min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
DRM_ERROR("Failed to copy user data (%p) to drm buffer"
" (%p) %dth page.\n",
user_data, buf, idx);
return -EFAULT;
}
}
buf->iterator = 0;
return 0;
}
/**
* Free the drm buffer object
*/
void drm_buffer_free(struct drm_buffer *buf)
{
if (buf != NULL) {
int nr_pages = buf->size / PAGE_SIZE + 1;
int idx;
for (idx = 0; idx < nr_pages; ++idx)
kfree(buf->data[idx]);
kfree(buf);
}
}
/**
* Read an object from buffer that may be split to multiple parts. If object
* is not split function just returns the pointer to object in buffer. But in
* case of split object data is copied to given stack object that is suplied
* by caller.
*
* The processing location of the buffer is also advanced to the next byte
* after the object.
*
* objsize: The size of the objet in bytes.
* stack_obj: A pointer to a memory location where object can be copied.
*/
void *drm_buffer_read_object(struct drm_buffer *buf,
int objsize, void *stack_obj)
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
void *obj = NULL;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];
} else {
/* The object is split which forces copy to temporary object.*/
int beginsz = PAGE_SIZE - idx;
memcpy(stack_obj, &buf->data[page][idx], beginsz);
memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
objsize - beginsz);
obj = stack_obj;
}
drm_buffer_advance(buf, objsize);
return obj;
}
/**************************************************************************
*
* Copyright 2010 Pauli Nieminen.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Multipart buffer for coping data which is larger than the page size.
*
* Authors:
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
#ifndef _DRM_BUFFER_H_
#define _DRM_BUFFER_H_
#include <drm/drmP.h>
struct drm_buffer {
int iterator;
int size;
char *data[];
};
/**
* Return the index of page that buffer is currently pointing at.
*/
static inline int drm_buffer_page(struct drm_buffer *buf)
{
return buf->iterator / PAGE_SIZE;
}
/**
* Return the index of the current byte in the page
*/
static inline int drm_buffer_index(struct drm_buffer *buf)
{
return buf->iterator & (PAGE_SIZE - 1);
}
/**
* Return number of bytes that is left to process
*/
static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
{
return buf->size - buf->iterator;
}
/**
* Advance the buffer iterator number of bytes that is given.
*/
static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
{
buf->iterator += bytes;
}
/**
* Allocate the drm buffer object.
*
* buf: A pointer to a pointer where the object is stored.
* size: The number of bytes to allocate.
*/
extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
/**
* Copy the user data to the begin of the buffer and reset the processing
* iterator.
*
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size);
/**
* Free the drm buffer object
*/
extern void drm_buffer_free(struct drm_buffer *buf);
/**
* Read an object from buffer that may be split to multiple parts. If object
* is not split function just returns the pointer to object in buffer. But in
* case of split object data is copied to given stack object that is suplied
* by caller.
*
* The processing location of the buffer is also advanced to the next byte
* after the object.
*
* objsize: The size of the objet in bytes.
* stack_obj: A pointer to a memory location where object can be copied.
*/
extern void *drm_buffer_read_object(struct drm_buffer *buf,
int objsize, void *stack_obj);
/**
* Returns the pointer to the dword which is offset number of elements from the
* current processing location.
*
* Caller must make sure that dword is not split in the buffer. This
* requirement is easily met if all the sizes of objects in buffer are
* multiples of dword and PAGE_SIZE is multiple dword.
*
* Call to this function doesn't change the processing location.
*
* offset: The index of the dword relative to the internat iterator.
*/
static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
int offset)
{
int iter = buffer->iterator + offset * 4;
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
}
/**
* Returns the pointer to the dword which is offset number of elements from
* the current processing location.
*
* Call to this function doesn't change the processing location.
*
* offset: The index of the byte relative to the internat iterator.
*/
static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
int offset)
{
int iter = buffer->iterator + offset;
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
}
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -2328,101 +2328,6 @@ int r600_cs_parse(struct radeon_cs_parser *p) ...@@ -2328,101 +2328,6 @@ int r600_cs_parse(struct radeon_cs_parser *p)
return 0; return 0;
} }
#ifdef CONFIG_DRM_RADEON_UMS
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
}
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
if (p->chunk_relocs == NULL) {
return 0;
}
p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
return 0;
}
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l)
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
struct r600_cs_track *track;
int r;
/* initialize tracker */
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (track == NULL)
return -ENOMEM;
r600_cs_track_init(track);
r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
parser.track = track;
parser.ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
r = r600_cs_parser_relocs_legacy(&parser);
if (r) {
DRM_ERROR("Failed to parse relocation !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
/* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be
* uncached). */
ib_chunk = parser.chunk_ib;
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
r = -EFAULT;
r600_cs_parser_fini(&parser, r);
return r;
}
r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
r600_cs_parser_fini(&parser, r);
return r;
}
void r600_cs_legacy_init(void)
{
r600_nomm = 1;
}
#endif
/* /*
* DMA * DMA
*/ */
......
This diff is collapsed.
...@@ -291,88 +291,6 @@ static struct pci_device_id pciidlist[] = { ...@@ -291,88 +291,6 @@ static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist); MODULE_DEVICE_TABLE(pci, pciidlist);
#ifdef CONFIG_DRM_RADEON_UMS
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return 0;
/* Disable *all* interrupts */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
return 0;
}
static int radeon_resume(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return 0;
/* Restore interrupt registers */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
return 0;
}
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static struct drm_driver driver_old = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
.open = radeon_driver_open,
.preclose = radeon_driver_preclose,
.postclose = radeon_driver_postclose,
.lastclose = radeon_driver_lastclose,
.set_busid = drm_pci_set_busid,
.unload = radeon_driver_unload,
.suspend = radeon_suspend,
.resume = radeon_resume,
.get_vblank_counter = radeon_get_vblank_counter,
.enable_vblank = radeon_enable_vblank,
.disable_vblank = radeon_disable_vblank,
.master_create = radeon_master_create,
.master_destroy = radeon_master_destroy,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = &radeon_driver_old_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
#endif
static struct drm_driver kms_driver; static struct drm_driver kms_driver;
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
...@@ -619,13 +537,6 @@ static struct drm_driver kms_driver = { ...@@ -619,13 +537,6 @@ static struct drm_driver kms_driver = {
static struct drm_driver *driver; static struct drm_driver *driver;
static struct pci_driver *pdriver; static struct pci_driver *pdriver;
#ifdef CONFIG_DRM_RADEON_UMS
static struct pci_driver radeon_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
#endif
static struct pci_driver radeon_kms_pci_driver = { static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME, .name = DRIVER_NAME,
.id_table = pciidlist, .id_table = pciidlist,
...@@ -655,16 +566,8 @@ static int __init radeon_init(void) ...@@ -655,16 +566,8 @@ static int __init radeon_init(void)
radeon_register_atpx_handler(); radeon_register_atpx_handler();
} else { } else {
#ifdef CONFIG_DRM_RADEON_UMS
DRM_INFO("radeon userspace modesetting enabled.\n");
driver = &driver_old;
pdriver = &radeon_pci_driver;
driver->driver_features &= ~DRIVER_MODESET;
driver->num_ioctls = radeon_max_ioctl;
#else
DRM_ERROR("No UMS support in radeon module!\n"); DRM_ERROR("No UMS support in radeon module!\n");
return -EINVAL; return -EINVAL;
#endif
} }
radeon_kfd_init(); radeon_kfd_init();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment