Commit d27c9b54 authored by Dave Airlie's avatar Dave Airlie Committed by Dave Airlie

drm: remove version.h and any version checks..

This patch removes all the drm kernel conditionals from the kernel DRM tree.
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent aa0ca6b4
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/version.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/smp_lock.h> /* For (un)lock_kernel */ #include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/mm.h> #include <linux/mm.h>
...@@ -161,36 +160,7 @@ ...@@ -161,36 +160,7 @@
#define pte_unmap(pte) #define pte_unmap(pte)
#endif #endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
static inline struct page * vmalloc_to_page(void * vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, addr);
if (!pmd_none(*pmd)) {
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
preempt_enable();
}
}
return page;
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#define DRM_RPR_ARG(vma)
#else
#define DRM_RPR_ARG(vma) vma, #define DRM_RPR_ARG(vma) vma,
#endif
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
...@@ -746,11 +716,7 @@ typedef struct drm_device { ...@@ -746,11 +716,7 @@ typedef struct drm_device {
int pci_slot; /**< PCI slot number */ int pci_slot; /**< PCI slot number */
int pci_func; /**< PCI function number */ int pci_func; /**< PCI function number */
#ifdef __alpha__ #ifdef __alpha__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
struct pci_controler *hose;
#else
struct pci_controller *hose; struct pci_controller *hose;
#endif
#endif #endif
drm_sg_mem_t *sg; /**< Scatter gather memory */ drm_sg_mem_t *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */ unsigned long *ctx_bitmap; /**< context bitmap */
......
...@@ -1499,34 +1499,18 @@ int drm_mapbufs( struct inode *inode, struct file *filp, ...@@ -1499,34 +1499,18 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
goto done; goto done;
} }
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem );
#else
down_write( &current->mm->mmap_sem ); down_write( &current->mm->mmap_sem );
#endif
virtual = do_mmap( filp, 0, map->size, virtual = do_mmap( filp, 0, map->size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED, MAP_SHARED,
token ); token );
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem ); up_write( &current->mm->mmap_sem );
#endif
} else { } else {
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem );
#else
down_write( &current->mm->mmap_sem ); down_write( &current->mm->mmap_sem );
#endif
virtual = do_mmap( filp, 0, dma->byte_count, virtual = do_mmap( filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED, 0 ); MAP_SHARED, 0 );
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem ); up_write( &current->mm->mmap_sem );
#endif
} }
if ( virtual > -1024UL ) { if ( virtual > -1024UL ) {
/* Real error */ /* Real error */
......
...@@ -314,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, ...@@ -314,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
static struct page *drm_vm_nopage(struct vm_area_struct *vma, static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int *type) { int *type) {
...@@ -344,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, ...@@ -344,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address); return drm_do_vm_sg_nopage(vma, address);
} }
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_nopage(vma, address);
}
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_shm_nopage(vma, address);
}
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_dma_nopage(vma, address);
}
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_sg_nopage(vma, address);
}
#endif
/** AGP virtual memory operations */ /** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = { static struct vm_operations_struct drm_vm_ops = {
.nopage = drm_vm_nopage, .nopage = drm_vm_nopage,
...@@ -496,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) ...@@ -496,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops; vma->vm_ops = &drm_vm_dma_ops;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open(vma);
...@@ -660,29 +625,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -660,29 +625,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *)map; vma->vm_private_data = (void *)map;
/* Don't let this area swap. Change when /* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */ DRM_KERNEL advisory is supported. */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
#endif
break; break;
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops; vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map; vma->vm_private_data = (void *)map;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
#endif
break; break;
default: default:
return -EINVAL; /* This should never happen. */ return -EINVAL; /* This should never happen. */
} }
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open(vma);
......
...@@ -45,11 +45,6 @@ ...@@ -45,11 +45,6 @@
#define I810_BUF_UNMAPPED 0 #define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1 #define I810_BUF_MAPPED 1
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define up_write up
#endif
static drm_buf_t *i810_freelist_get(drm_device_t *dev) static drm_buf_t *i810_freelist_get(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
......
...@@ -47,11 +47,6 @@ ...@@ -47,11 +47,6 @@
#define I830_BUF_UNMAPPED 0 #define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1 #define I830_BUF_MAPPED 1
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define up_write up
#endif
static drm_buf_t *i830_freelist_get(drm_device_t *dev) static drm_buf_t *i830_freelist_get(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment