Commit 00a5bfb7 authored by Linus Torvalds's avatar Linus Torvalds

Update direct-rendering to current DRI CVS tree.

This adds support for i830 interrupt handling, and new improved
lock context keying. See per-file comments for more detail, as this
commit sadly mixes up a few different things (that's what you get
for not tracking the changes at a fine enough granularity).
parent c03239b7
...@@ -22,7 +22,13 @@ config DRM_TDFX ...@@ -22,7 +22,13 @@ config DRM_TDFX
Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
graphics card. If M is selected, the module will be called tdfx. graphics card. If M is selected, the module will be called tdfx.
# tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA config DRM_GAMMA
tristate "3dlabs GMX 2000"
depends on DRM && n
help
This is the old gamma driver, disabled for now unless somebody
tells me it actually might work.
config DRM_R128 config DRM_R128
tristate "ATI Rage 128" tristate "ATI Rage 128"
depends on DRM depends on DRM
...@@ -60,4 +66,3 @@ config DRM_MGA ...@@ -60,4 +66,3 @@ config DRM_MGA
Choose this option if you have a Matrox G200, G400 or G450 graphics Choose this option if you have a Matrox G200, G400 or G450 graphics
card. If M is selected, the module will be called mga. AGP card. If M is selected, the module will be called mga. AGP
support is required for this driver to work. support is required for this driver to work.
...@@ -7,7 +7,7 @@ tdfx-objs := tdfx_drv.o ...@@ -7,7 +7,7 @@ tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o i830-objs := i830_drv.o i830_dma.o i830_irq.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
ffb-objs := ffb_drv.o ffb_context.o ffb-objs := ffb_drv.o ffb_context.o
......
...@@ -254,6 +254,7 @@ do { \ ...@@ -254,6 +254,7 @@ do { \
} \ } \
} \ } \
} while(0) } while(0)
#define DRM_DROP_MAP(_map)
/* Internal types and structures */ /* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
...@@ -268,6 +269,17 @@ do { \ ...@@ -268,6 +269,17 @@ do { \
(_map) = (_dev)->context_sareas[_ctx]; \ (_map) = (_dev)->context_sareas[_ctx]; \
} while(0) } while(0)
#define LOCK_TEST_WITH_RETURN( dev, filp ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.filp != filp ) { \
DRM_ERROR( "%s called without lock held\n", \
__FUNCTION__ ); \
return -EINVAL; \
} \
} while (0)
typedef int drm_ioctl_t( struct inode *inode, struct file *filp, typedef int drm_ioctl_t( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
...@@ -316,7 +328,7 @@ typedef struct drm_buf { ...@@ -316,7 +328,7 @@ typedef struct drm_buf {
__volatile__ int waiting; /* On kernel DMA queue */ __volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */ __volatile__ int pending; /* On hardware DMA queue */
wait_queue_head_t dma_wait; /* Processes waiting */ wait_queue_head_t dma_wait; /* Processes waiting */
pid_t pid; /* PID of holding process */ struct file *filp; /* Pointer to holding file descr */
int context; /* Kernel queue for this buffer */ int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */ int while_locked;/* Dispatch this buffer while locked */
enum { enum {
...@@ -434,7 +446,7 @@ typedef struct drm_queue { ...@@ -434,7 +446,7 @@ typedef struct drm_queue {
typedef struct drm_lock_data { typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */ drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */ struct file *filp; /* File descr of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /* Queue of blocked processes */ wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */ unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t; } drm_lock_data_t;
...@@ -516,6 +528,8 @@ typedef struct drm_map_list { ...@@ -516,6 +528,8 @@ typedef struct drm_map_list {
drm_map_t *map; drm_map_t *map;
} drm_map_list_t; } drm_map_list_t;
typedef drm_map_t drm_local_map_t;
#if __HAVE_VBL_IRQ #if __HAVE_VBL_IRQ
typedef struct drm_vbl_sig { typedef struct drm_vbl_sig {
...@@ -591,6 +605,7 @@ typedef struct drm_device { ...@@ -591,6 +605,7 @@ typedef struct drm_device {
atomic_t vbl_received; atomic_t vbl_received;
spinlock_t vbl_lock; spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs; drm_vbl_sig_t vbl_sigs;
unsigned int vbl_pending;
#endif #endif
cycles_t ctx_start; cycles_t ctx_start;
cycles_t lck_start; cycles_t lck_start;
...@@ -807,15 +822,15 @@ extern int DRM(mapbufs)( struct inode *inode, struct file *filp, ...@@ -807,15 +822,15 @@ extern int DRM(mapbufs)( struct inode *inode, struct file *filp,
extern int DRM(dma_setup)(drm_device_t *dev); extern int DRM(dma_setup)(drm_device_t *dev);
extern void DRM(dma_takedown)(drm_device_t *dev); extern void DRM(dma_takedown)(drm_device_t *dev);
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf); extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
extern void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid); extern void DRM(reclaim_buffers)( struct file *filp );
#if __HAVE_OLD_DMA #if __HAVE_OLD_DMA
/* GH: This is a dirty hack for now... /* GH: This is a dirty hack for now...
*/ */
extern void DRM(clear_next_buffer)(drm_device_t *dev); extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern int DRM(select_queue)(drm_device_t *dev, extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long)); void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma); extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma); extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
#endif #endif
#if __HAVE_DMA_IRQ #if __HAVE_DMA_IRQ
extern int DRM(control)( struct inode *inode, struct file *filp, extern int DRM(control)( struct inode *inode, struct file *filp,
......
...@@ -266,12 +266,12 @@ drm_agp_head_t *DRM(agp_init)(void) ...@@ -266,12 +266,12 @@ drm_agp_head_t *DRM(agp_init)(void)
head->cant_use_aperture = head->agp_info.cant_use_aperture; head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask; head->page_mask = head->agp_info.page_mask;
#endif #endif
DRM_DEBUG("AGP %d.%d, aperture @ 0x%08lx %ZuMB\n", DRM_INFO("AGP %d.%d aperture @ 0x%08lx %ZuMB\n",
head->agp_info.version.major, head->agp_info.version.major,
head->agp_info.version.minor, head->agp_info.version.minor,
head->agp_info.aper_base, head->agp_info.aper_base,
head->agp_info.aper_size); head->agp_info.aper_size);
} }
return head; return head;
} }
......
...@@ -403,7 +403,7 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp, ...@@ -403,7 +403,7 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
buf->waiting = 0; buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
init_waitqueue_head( &buf->dma_wait ); init_waitqueue_head( &buf->dma_wait );
buf->pid = 0; buf->filp = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T); buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T), buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
...@@ -616,7 +616,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp, ...@@ -616,7 +616,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
buf->waiting = 0; buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
init_waitqueue_head( &buf->dma_wait ); init_waitqueue_head( &buf->dma_wait );
buf->pid = 0; buf->filp = 0;
#if __HAVE_DMA_HISTOGRAM #if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0; buf->time_queued = 0;
buf->time_dispatched = 0; buf->time_dispatched = 0;
...@@ -773,7 +773,7 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp, ...@@ -773,7 +773,7 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
buf->waiting = 0; buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
init_waitqueue_head( &buf->dma_wait ); init_waitqueue_head( &buf->dma_wait );
buf->pid = 0; buf->filp = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T); buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T), buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
...@@ -1011,9 +1011,9 @@ int DRM(freebufs)( struct inode *inode, struct file *filp, ...@@ -1011,9 +1011,9 @@ int DRM(freebufs)( struct inode *inode, struct file *filp,
return -EINVAL; return -EINVAL;
} }
buf = dma->buflist[idx]; buf = dma->buflist[idx];
if ( buf->pid != current->pid ) { if ( buf->filp != filp ) {
DRM_ERROR( "Process %d freeing buffer owned by %d\n", DRM_ERROR( "Process %d freeing buffer not owned\n",
current->pid, buf->pid ); current->pid );
return -EINVAL; return -EINVAL;
} }
DRM(free_buffer)( dev, buf ); DRM(free_buffer)( dev, buf );
......
...@@ -188,7 +188,7 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf) ...@@ -188,7 +188,7 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
buf->waiting = 0; buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
buf->pid = 0; buf->filp = 0;
buf->used = 0; buf->used = 0;
#if __HAVE_DMA_HISTOGRAM #if __HAVE_DMA_HISTOGRAM
buf->time_completed = get_cycles(); buf->time_completed = get_cycles();
...@@ -210,14 +210,16 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf) ...@@ -210,14 +210,16 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
} }
#if !__HAVE_DMA_RECLAIM #if !__HAVE_DMA_RECLAIM
void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid) void DRM(reclaim_buffers)( struct file *filp )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
int i; int i;
if (!dma) return; if (!dma) return;
for (i = 0; i < dma->buf_count; i++) { for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->pid == pid) { if (dma->buflist[i]->filp == filp) {
switch (dma->buflist[i]->list) { switch (dma->buflist[i]->list) {
case DRM_LIST_NONE: case DRM_LIST_NONE:
DRM(free_buffer)(dev, dma->buflist[i]); DRM(free_buffer)(dev, dma->buflist[i]);
...@@ -318,8 +320,10 @@ int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) ...@@ -318,8 +320,10 @@ int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
} }
int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d) int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i; int i;
drm_queue_t *q; drm_queue_t *q;
drm_buf_t *buf; drm_buf_t *buf;
...@@ -381,10 +385,10 @@ int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d) ...@@ -381,10 +385,10 @@ int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
return -EINVAL; return -EINVAL;
} }
buf = dma->buflist[ idx ]; buf = dma->buflist[ idx ];
if (buf->pid != current->pid) { if (buf->filp != filp) {
atomic_dec(&q->use_count); atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer owned by %d\n", DRM_ERROR("Process %d using buffer not owned\n",
current->pid, buf->pid); current->pid);
return -EINVAL; return -EINVAL;
} }
if (buf->list != DRM_LIST_NONE) { if (buf->list != DRM_LIST_NONE) {
...@@ -426,9 +430,11 @@ int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d) ...@@ -426,9 +430,11 @@ int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
return 0; return 0;
} }
static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d, static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
int order) int order)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i; int i;
drm_buf_t *buf; drm_buf_t *buf;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
...@@ -438,13 +444,13 @@ static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d, ...@@ -438,13 +444,13 @@ static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
d->flags & _DRM_DMA_WAIT); d->flags & _DRM_DMA_WAIT);
if (!buf) break; if (!buf) break;
if (buf->pending || buf->waiting) { if (buf->pending || buf->waiting) {
DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n", DRM_ERROR("Free buffer %d in use by %x (w%d, p%d)\n",
buf->idx, buf->idx,
buf->pid, buf->filp,
buf->waiting, buf->waiting,
buf->pending); buf->pending);
} }
buf->pid = current->pid; buf->filp = filp;
if (copy_to_user(&d->request_indices[i], if (copy_to_user(&d->request_indices[i],
&buf->idx, &buf->idx,
sizeof(buf->idx))) sizeof(buf->idx)))
...@@ -461,7 +467,7 @@ static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d, ...@@ -461,7 +467,7 @@ static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
} }
int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma) int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
{ {
int order; int order;
int retcode = 0; int retcode = 0;
...@@ -470,7 +476,7 @@ int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma) ...@@ -470,7 +476,7 @@ int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
order = DRM(order)(dma->request_size); order = DRM(order)(dma->request_size);
dma->granted_count = 0; dma->granted_count = 0;
retcode = DRM(dma_get_buffers_of_order)(dev, dma, order); retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
if (dma->granted_count < dma->request_count if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_SMALLER_OK)) { && (dma->flags & _DRM_DMA_SMALLER_OK)) {
...@@ -480,7 +486,7 @@ int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma) ...@@ -480,7 +486,7 @@ int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
&& tmp_order >= DRM_MIN_ORDER; && tmp_order >= DRM_MIN_ORDER;
--tmp_order) { --tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(dev, dma, retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order); tmp_order);
} }
} }
...@@ -493,7 +499,7 @@ int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma) ...@@ -493,7 +499,7 @@ int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
&& tmp_order <= DRM_MAX_ORDER; && tmp_order <= DRM_MAX_ORDER;
++tmp_order) { ++tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(dev, dma, retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order); tmp_order);
} }
} }
...@@ -540,6 +546,8 @@ int DRM(irq_install)( drm_device_t *dev, int irq ) ...@@ -540,6 +546,8 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
spin_lock_init( &dev->vbl_lock ); spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head ); INIT_LIST_HEAD( &dev->vbl_sigs.head );
dev->vbl_pending = 0;
#endif #endif
/* Before installing handler */ /* Before installing handler */
...@@ -622,6 +630,7 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -622,6 +630,7 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) { switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE: case _DRM_VBLANK_RELATIVE:
vblwait.request.sequence += atomic_read( &dev->vbl_received ); vblwait.request.sequence += atomic_read( &dev->vbl_received );
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE: case _DRM_VBLANK_ABSOLUTE:
break; break;
default: default:
...@@ -632,10 +641,38 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -632,10 +641,38 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
if ( flags & _DRM_VBLANK_SIGNAL ) { if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags; unsigned long irqflags;
drm_vbl_sig_t *vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ); drm_vbl_sig_t *vbl_sig;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
spin_lock_irqsave( &dev->vbl_lock, irqflags );
/* Check if this task has already scheduled the same signal
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current)
{
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
goto done;
}
}
if ( dev->vbl_pending >= 100 ) {
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
return -EBUSY;
}
dev->vbl_pending++;
if ( !vbl_sig ) spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
return -ENOMEM; return -ENOMEM;
}
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) ); memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
...@@ -643,9 +680,6 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -643,9 +680,6 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
vbl_sig->info.si_signo = vblwait.request.signal; vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current; vbl_sig->task = current;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
/* Hook signal entry into list */
spin_lock_irqsave( &dev->vbl_lock, irqflags ); spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head ); list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
...@@ -659,6 +693,7 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -659,6 +693,7 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
vblwait.reply.tval_usec = now.tv_usec; vblwait.reply.tval_usec = now.tv_usec;
} }
done:
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) ); sizeof(vblwait) );
...@@ -667,25 +702,23 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -667,25 +702,23 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
void DRM(vbl_send_signals)( drm_device_t *dev ) void DRM(vbl_send_signals)( drm_device_t *dev )
{ {
struct list_head *entry, *tmp; struct list_head *tmp;
drm_vbl_sig_t *vbl_sig; drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received ); unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags; unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags ); spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( entry, tmp, &dev->vbl_sigs.head ) { list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) {
vbl_sig = (drm_vbl_sig_t *) entry;
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
vbl_sig->info.si_code = vbl_seq;
vbl_sig->info.si_code = atomic_read( &dev->vbl_received );
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
list_del( entry ); list_del( (struct list_head *) vbl_sig );
DRM_FREE( vbl_sig, sizeof(*vbl_sig) );
DRM_FREE( entry ); dev->vbl_pending--;
} }
} }
......
...@@ -323,6 +323,8 @@ static int DRM(setup)( drm_device_t *dev ) ...@@ -323,6 +323,8 @@ static int DRM(setup)( drm_device_t *dev )
dev->last_context = 0; dev->last_context = 0;
dev->last_switch = 0; dev->last_switch = 0;
dev->last_checked = 0; dev->last_checked = 0;
init_timer( &dev->timer );
init_waitqueue_head( &dev->context_wait );
dev->ctx_start = 0; dev->ctx_start = 0;
dev->lck_start = 0; dev->lck_start = 0;
...@@ -494,7 +496,7 @@ static int DRM(takedown)( drm_device_t *dev ) ...@@ -494,7 +496,7 @@ static int DRM(takedown)( drm_device_t *dev )
#endif #endif
if ( dev->lock.hw_lock ) { if ( dev->lock.hw_lock ) {
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0; dev->lock.filp = 0;
wake_up_interruptible( &dev->lock.lock_queue ); wake_up_interruptible( &dev->lock.lock_queue );
} }
up( &dev->struct_sem ); up( &dev->struct_sem );
...@@ -576,13 +578,9 @@ static int __init drm_init( void ) ...@@ -576,13 +578,9 @@ static int __init drm_init( void )
memset( (void *)dev, 0, sizeof(*dev) ); memset( (void *)dev, 0, sizeof(*dev) );
dev->count_lock = SPIN_LOCK_UNLOCKED; dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init( &dev->struct_sem, 1 ); sema_init( &dev->struct_sem, 1 );
init_timer( &dev->timer );
init_waitqueue_head( &dev->context_wait );
if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0) { if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
retcode = -EPERM; return -EPERM;
goto fail_reg;
}
dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] ); dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] );
dev->name = DRIVER_NAME; dev->name = DRIVER_NAME;
...@@ -591,8 +589,9 @@ static int __init drm_init( void ) ...@@ -591,8 +589,9 @@ static int __init drm_init( void )
#if __MUST_HAVE_AGP #if __MUST_HAVE_AGP
if ( dev->agp == NULL ) { if ( dev->agp == NULL ) {
DRM_ERROR( "Cannot initialize the agpgart module.\n" ); DRM_ERROR( "Cannot initialize the agpgart module.\n" );
retcode = -ENOMEM; DRM(stub_unregister)(DRM(minor)[i]);
goto fail; DRM(takedown)( dev );
return -ENOMEM;
} }
#endif #endif
#if __REALLY_HAVE_MTRR #if __REALLY_HAVE_MTRR
...@@ -608,7 +607,9 @@ static int __init drm_init( void ) ...@@ -608,7 +607,9 @@ static int __init drm_init( void )
retcode = DRM(ctxbitmap_init)( dev ); retcode = DRM(ctxbitmap_init)( dev );
if( retcode ) { if( retcode ) {
DRM_ERROR( "Cannot allocate memory for context bitmap.\n" ); DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
goto fail; DRM(stub_unregister)(DRM(minor)[i]);
DRM(takedown)( dev );
return retcode;
} }
#endif #endif
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n", DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
...@@ -623,17 +624,6 @@ static int __init drm_init( void ) ...@@ -623,17 +624,6 @@ static int __init drm_init( void )
DRIVER_POSTINIT(); DRIVER_POSTINIT();
return 0; return 0;
#if (__REALLY_HAVE_AGP && __MUST_HAVE_AGP) || __HAVE_CTX_BITMAP
fail:
DRM(stub_unregister)(DRM(minor)[i]);
DRM(takedown)( dev );
#endif
fail_reg:
kfree (DRM(device));
kfree (DRM(minor));
return retcode;
} }
/* drm_cleanup is called via cleanup_module at module unload time. /* drm_cleanup is called via cleanup_module at module unload time.
...@@ -740,8 +730,6 @@ int DRM(open)( struct inode *inode, struct file *filp ) ...@@ -740,8 +730,6 @@ int DRM(open)( struct inode *inode, struct file *filp )
return -ENODEV; return -ENODEV;
} }
DRM_DEBUG( "open_count = %d\n", dev->open_count );
retcode = DRM(open_helper)( inode, filp, dev ); retcode = DRM(open_helper)( inode, filp, dev );
if ( !retcode ) { if ( !retcode ) {
atomic_inc( &dev->counts[_DRM_STAT_OPENS] ); atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
...@@ -773,15 +761,15 @@ int DRM(release)( struct inode *inode, struct file *filp ) ...@@ -773,15 +761,15 @@ int DRM(release)( struct inode *inode, struct file *filp )
* Begin inline drm_release * Begin inline drm_release
*/ */
DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n", DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
current->pid, dev->device, dev->open_count ); current->pid, (long)dev->device, dev->open_count );
if ( dev->lock.hw_lock && if ( dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.pid == current->pid ) { dev->lock.filp == filp ) {
DRM_DEBUG( "Process %d dead, freeing lock for context %d\n", DRM_DEBUG( "File %p released, freeing lock for context %d\n",
current->pid, filp,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
#if __HAVE_RELEASE #if __HAVE_RELEASE
DRIVER_RELEASE(); DRIVER_RELEASE();
#endif #endif
...@@ -797,6 +785,7 @@ int DRM(release)( struct inode *inode, struct file *filp ) ...@@ -797,6 +785,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
else if ( dev->lock.hw_lock ) { else if ( dev->lock.hw_lock ) {
/* The lock is required to reclaim buffers */ /* The lock is required to reclaim buffers */
DECLARE_WAITQUEUE( entry, current ); DECLARE_WAITQUEUE( entry, current );
add_wait_queue( &dev->lock.lock_queue, &entry ); add_wait_queue( &dev->lock.lock_queue, &entry );
for (;;) { for (;;) {
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
...@@ -807,7 +796,7 @@ int DRM(release)( struct inode *inode, struct file *filp ) ...@@ -807,7 +796,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
} }
if ( DRM(lock_take)( &dev->lock.hw_lock->lock, if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT ) ) { DRM_KERNEL_CONTEXT ) ) {
dev->lock.pid = priv->pid; dev->lock.filp = filp;
dev->lock.lock_time = jiffies; dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */ break; /* Got lock */
...@@ -831,7 +820,7 @@ int DRM(release)( struct inode *inode, struct file *filp ) ...@@ -831,7 +820,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
} }
} }
#elif __HAVE_DMA #elif __HAVE_DMA
DRM(reclaim_buffers)( dev, priv->pid ); DRM(reclaim_buffers)( filp );
#endif #endif
DRM(fasync)( -1, filp, 0 ); DRM(fasync)( -1, filp, 0 );
...@@ -855,7 +844,7 @@ int DRM(release)( struct inode *inode, struct file *filp ) ...@@ -855,7 +844,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
dev->file_last = priv->prev; dev->file_last = priv->prev;
} }
up( &dev->struct_sem ); up( &dev->struct_sem );
DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES ); DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
/* ======================================================== /* ========================================================
...@@ -880,6 +869,7 @@ int DRM(release)( struct inode *inode, struct file *filp ) ...@@ -880,6 +869,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
spin_unlock( &dev->count_lock ); spin_unlock( &dev->count_lock );
unlock_kernel(); unlock_kernel();
return retcode; return retcode;
} }
...@@ -899,8 +889,9 @@ int DRM(ioctl)( struct inode *inode, struct file *filp, ...@@ -899,8 +889,9 @@ int DRM(ioctl)( struct inode *inode, struct file *filp,
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
++priv->ioctl_count; ++priv->ioctl_count;
DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n", DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
current->pid, cmd, nr, dev->device, priv->authenticated ); current->pid, cmd, nr, (long)dev->device,
priv->authenticated );
if ( nr >= DRIVER_IOCTL_COUNT ) { if ( nr >= DRIVER_IOCTL_COUNT ) {
retcode = -EINVAL; retcode = -EINVAL;
...@@ -976,7 +967,7 @@ int DRM(lock)( struct inode *inode, struct file *filp, ...@@ -976,7 +967,7 @@ int DRM(lock)( struct inode *inode, struct file *filp,
} }
if ( DRM(lock_take)( &dev->lock.hw_lock->lock, if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
lock.context ) ) { lock.context ) ) {
dev->lock.pid = current->pid; dev->lock.filp = filp;
dev->lock.lock_time = jiffies; dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */ break; /* Got lock */
...@@ -1058,7 +1049,7 @@ int DRM(unlock)( struct inode *inode, struct file *filp, ...@@ -1058,7 +1049,7 @@ int DRM(unlock)( struct inode *inode, struct file *filp,
* agent to request it then we should just be able to * agent to request it then we should just be able to
* take it immediately and not eat the ioctl. * take it immediately and not eat the ioctl.
*/ */
dev->lock.pid = 0; dev->lock.filp = 0;
{ {
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
unsigned int old, new, prev, ctx; unsigned int old, new, prev, ctx;
......
...@@ -94,8 +94,8 @@ int DRM(flush)(struct file *filp) ...@@ -94,8 +94,8 @@ int DRM(flush)(struct file *filp)
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
current->pid, dev->device, dev->open_count); current->pid, (long)dev->device, dev->open_count);
return 0; return 0;
} }
...@@ -105,7 +105,7 @@ int DRM(fasync)(int fd, struct file *filp, int on) ...@@ -105,7 +105,7 @@ int DRM(fasync)(int fd, struct file *filp, int on)
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
int retcode; int retcode;
DRM_DEBUG("fd = %d, device = 0x%x\n", fd, dev->device); DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)dev->device);
retcode = fasync_helper(fd, filp, on, &dev->buf_async); retcode = fasync_helper(fd, filp, on, &dev->buf_async);
if (retcode < 0) return retcode; if (retcode < 0) return retcode;
return 0; return 0;
......
...@@ -40,6 +40,28 @@ int DRM(irq_busid)(struct inode *inode, struct file *filp, ...@@ -40,6 +40,28 @@ int DRM(irq_busid)(struct inode *inode, struct file *filp,
if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p))) if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
return -EFAULT; return -EFAULT;
#ifdef __alpha__
{
int domain = p.busnum >> 8;
p.busnum &= 0xff;
/*
* Find the hose the device is on (the domain number is the
* hose index) and offset the bus by the root bus of that
* hose.
*/
for(dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,NULL);
dev;
dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,dev)) {
struct pci_controller *hose = dev->sysdata;
if (hose->index == domain) {
p.busnum += hose->bus->number;
break;
}
}
}
#endif
dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum)); dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum));
if (!dev) { if (!dev) {
DRM_ERROR("pci_find_slot failed for %d:%d:%d\n", DRM_ERROR("pci_find_slot failed for %d:%d:%d\n",
...@@ -112,7 +134,7 @@ int DRM(setunique)(struct inode *inode, struct file *filp, ...@@ -112,7 +134,7 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
do { do {
struct pci_dev *pci_dev; struct pci_dev *pci_dev;
int b, d, f; int domain, b, d, f;
char *p; char *p;
for(p = dev->unique; p && *p && *p != ':'; p++); for(p = dev->unique; p && *p && *p != ':'; p++);
...@@ -124,6 +146,27 @@ int DRM(setunique)(struct inode *inode, struct file *filp, ...@@ -124,6 +146,27 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
f = (int)simple_strtoul(p+1, &p, 10); f = (int)simple_strtoul(p+1, &p, 10);
if (*p) break; if (*p) break;
domain = b >> 8;
b &= 0xff;
#ifdef __alpha__
/*
* Find the hose the device is on (the domain number is the
* hose index) and offset the bus by the root bus of that
* hose.
*/
for(pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,NULL);
pci_dev;
pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,pci_dev)) {
struct pci_controller *hose = pci_dev->sysdata;
if (hose->index == domain) {
b += hose->bus->number;
break;
}
}
#endif
pci_dev = pci_find_slot(b, PCI_DEVFN(d,f)); pci_dev = pci_find_slot(b, PCI_DEVFN(d,f));
if (pci_dev) { if (pci_dev) {
dev->pdev = pci_dev; dev->pdev = pci_dev;
......
...@@ -72,8 +72,8 @@ int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf) ...@@ -72,8 +72,8 @@ int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
left = DRM_LEFTCOUNT(bl); left = DRM_LEFTCOUNT(bl);
if (!left) { if (!left) {
DRM_ERROR("Overflow while adding buffer %d from pid %d\n", DRM_ERROR("Overflow while adding buffer %d from filp %p\n",
buf->idx, buf->pid); buf->idx, buf->filp);
return -EINVAL; return -EINVAL;
} }
#if __HAVE_DMA_HISTOGRAM #if __HAVE_DMA_HISTOGRAM
......
...@@ -78,7 +78,7 @@ int DRM(lock_transfer)(drm_device_t *dev, ...@@ -78,7 +78,7 @@ int DRM(lock_transfer)(drm_device_t *dev,
{ {
unsigned int old, new, prev; unsigned int old, new, prev;
dev->lock.pid = 0; dev->lock.filp = 0;
do { do {
old = *lock; old = *lock;
new = context | _DRM_LOCK_HELD; new = context | _DRM_LOCK_HELD;
...@@ -91,19 +91,17 @@ int DRM(lock_free)(drm_device_t *dev, ...@@ -91,19 +91,17 @@ int DRM(lock_free)(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context) __volatile__ unsigned int *lock, unsigned int context)
{ {
unsigned int old, new, prev; unsigned int old, new, prev;
pid_t pid = dev->lock.pid;
dev->lock.pid = 0; dev->lock.filp = 0;
do { do {
old = *lock; old = *lock;
new = 0; new = 0;
prev = cmpxchg(lock, old, new); prev = cmpxchg(lock, old, new);
} while (prev != old); } while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n", DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, context,
_DRM_LOCKING_CONTEXT(old), _DRM_LOCKING_CONTEXT(old));
pid);
return 1; return 1;
} }
wake_up_interruptible(&dev->lock.lock_queue); wake_up_interruptible(&dev->lock.lock_queue);
......
...@@ -2,16 +2,17 @@ ...@@ -2,16 +2,17 @@
#include <linux/interrupt.h> /* For task queue support */ #include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h> #include <linux/delay.h>
#define DRMFILE struct file *
#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data #define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data
#define DRM_ERR(d) -(d) #define DRM_ERR(d) -(d)
#define DRM_CURRENTPID current->pid #define DRM_CURRENTPID current->pid
#define DRM_UDELAY(d) udelay(d) #define DRM_UDELAY(d) udelay(d)
#define DRM_READ8(addr) readb(addr) #define DRM_READ8(map, offset) readb(((unsigned long)(map)->handle) + (offset))
#define DRM_READ32(addr) readl(addr) #define DRM_READ32(map, offset) readl(((unsigned long)(map)->handle) + (offset))
#define DRM_WRITE8(addr, val) writeb(val, addr) #define DRM_WRITE8(map, offset, val) writeb(val, ((unsigned long)(map)->handle) + (offset))
#define DRM_WRITE32(addr, val) writel(val, addr) #define DRM_WRITE32(map, offset, val) writel(val, ((unsigned long)(map)->handle) + (offset))
#define DRM_READMEMORYBARRIER() mb() #define DRM_READMEMORYBARRIER(map) mb()
#define DRM_WRITEMEMORYBARRIER() wmb() #define DRM_WRITEMEMORYBARRIER(map) wmb()
#define DRM_DEVICE drm_file_t *priv = filp->private_data; \ #define DRM_DEVICE drm_file_t *priv = filp->private_data; \
drm_device_t *dev = priv->dev drm_device_t *dev = priv->dev
...@@ -41,7 +42,7 @@ ...@@ -41,7 +42,7 @@
/* malloc/free without the overhead of DRM(alloc) */ /* malloc/free without the overhead of DRM(alloc) */
#define DRM_MALLOC(x) kmalloc(x, GFP_KERNEL) #define DRM_MALLOC(x) kmalloc(x, GFP_KERNEL)
#define DRM_FREE(x) kfree(x) #define DRM_FREE(x,size) kfree(x)
#define DRM_GETSAREA() \ #define DRM_GETSAREA() \
do { \ do { \
......
...@@ -147,10 +147,10 @@ static int DRM(name_info)(char *buf, char **start, off_t offset, int request, ...@@ -147,10 +147,10 @@ static int DRM(name_info)(char *buf, char **start, off_t offset, int request,
*eof = 0; *eof = 0;
if (dev->unique) { if (dev->unique) {
DRM_PROC_PRINT("%s 0x%x %s\n", DRM_PROC_PRINT("%s 0x%lx %s\n",
dev->name, dev->device, dev->unique); dev->name, (long)dev->device, dev->unique);
} else { } else {
DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device); DRM_PROC_PRINT("%s 0x%lx\n", dev->name, (long)dev->device);
} }
if (len > request + offset) return request; if (len > request + offset) return request;
......
...@@ -188,7 +188,7 @@ static int gamma_do_dma(drm_device_t *dev, int locked) ...@@ -188,7 +188,7 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Dispatching buffer %d from pid %d" DRM_ERROR("Dispatching buffer %d from pid %d"
" \"while locked\", but no lock held\n", " \"while locked\", but no lock held\n",
buf->idx, buf->pid); buf->idx, current->pid);
} }
} else { } else {
if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock, if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
...@@ -340,7 +340,8 @@ int gamma_dma_schedule(drm_device_t *dev, int locked) ...@@ -340,7 +340,8 @@ int gamma_dma_schedule(drm_device_t *dev, int locked)
return retcode; return retcode;
} }
static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d) static int gamma_dma_priority(struct file *filp,
drm_device_t *dev, drm_dma_t *d)
{ {
unsigned long address; unsigned long address;
unsigned long length; unsigned long length;
...@@ -378,15 +379,15 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d) ...@@ -378,15 +379,15 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
continue; continue;
} }
buf = dma->buflist[ idx ]; buf = dma->buflist[ idx ];
if (buf->pid != current->pid) { if (buf->filp != filp) {
DRM_ERROR("Process %d using buffer owned by %d\n", DRM_ERROR("Process %d using buffer not owned\n",
current->pid, buf->pid); current->pid);
retcode = -EINVAL; retcode = -EINVAL;
goto cleanup; goto cleanup;
} }
if (buf->list != DRM_LIST_NONE) { if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using %d's buffer on list %d\n", DRM_ERROR("Process %d using buffer on list %d\n",
current->pid, buf->pid, buf->list); current->pid, buf->list);
retcode = -EINVAL; retcode = -EINVAL;
goto cleanup; goto cleanup;
} }
...@@ -478,7 +479,8 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d) ...@@ -478,7 +479,8 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
return retcode; return retcode;
} }
static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d) static int gamma_dma_send_buffers(struct file *filp,
drm_device_t *dev, drm_dma_t *d)
{ {
DECLARE_WAITQUEUE(entry, current); DECLARE_WAITQUEUE(entry, current);
drm_buf_t *last_buf = NULL; drm_buf_t *last_buf = NULL;
...@@ -490,7 +492,7 @@ static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d) ...@@ -490,7 +492,7 @@ static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
add_wait_queue(&last_buf->dma_wait, &entry); add_wait_queue(&last_buf->dma_wait, &entry);
} }
if ((retcode = gamma_dma_enqueue(dev, d))) { if ((retcode = gamma_dma_enqueue(filp, d))) {
if (d->flags & _DRM_DMA_BLOCK) if (d->flags & _DRM_DMA_BLOCK)
remove_wait_queue(&last_buf->dma_wait, &entry); remove_wait_queue(&last_buf->dma_wait, &entry);
return retcode; return retcode;
...@@ -520,14 +522,13 @@ static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d) ...@@ -520,14 +522,13 @@ static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
} }
} }
if (retcode) { if (retcode) {
DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n", DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
d->context, d->context,
last_buf->waiting, last_buf->waiting,
last_buf->pending, last_buf->pending,
DRM_WAITCOUNT(dev, d->context), (long)DRM_WAITCOUNT(dev, d->context),
last_buf->idx, last_buf->idx,
last_buf->list, last_buf->list,
last_buf->pid,
current->pid); current->pid);
} }
} }
...@@ -560,15 +561,15 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd, ...@@ -560,15 +561,15 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
if (d.send_count) { if (d.send_count) {
if (d.flags & _DRM_DMA_PRIORITY) if (d.flags & _DRM_DMA_PRIORITY)
retcode = gamma_dma_priority(dev, &d); retcode = gamma_dma_priority(filp, dev, &d);
else else
retcode = gamma_dma_send_buffers(dev, &d); retcode = gamma_dma_send_buffers(filp, dev, &d);
} }
d.granted_count = 0; d.granted_count = 0;
if (!retcode && d.request_count) { if (!retcode && d.request_count) {
retcode = gamma_dma_get_buffers(dev, &d); retcode = gamma_dma_get_buffers(filp, &d);
} }
DRM_DEBUG("%d returning, granted = %d\n", DRM_DEBUG("%d returning, granted = %d\n",
...@@ -590,7 +591,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) ...@@ -590,7 +591,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
drm_buf_t *buf; drm_buf_t *buf;
int i; int i;
struct list_head *list; struct list_head *list;
unsigned int *pgt; unsigned long *pgt;
DRM_DEBUG( "%s\n", __FUNCTION__ ); DRM_DEBUG( "%s\n", __FUNCTION__ );
...@@ -643,7 +644,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) ...@@ -643,7 +644,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
buf = dma->buflist[i]; buf = dma->buflist[i];
*pgt = (unsigned int)buf->address + 0x07; *pgt = (unsigned long)buf->address + 0x07;
pgt++; pgt++;
} }
......
...@@ -42,16 +42,6 @@ typedef struct drm_gamma_private { ...@@ -42,16 +42,6 @@ typedef struct drm_gamma_private {
drm_map_t *mmio3; drm_map_t *mmio3;
} drm_gamma_private_t; } drm_gamma_private_t;
#define LOCK_TEST_WITH_RETURN( dev ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.pid != current->pid ) { \
DRM_ERROR( "%s called without lock held\n", \
__FUNCTION__ ); \
return -EINVAL; \
} \
} while (0)
/* gamma_dma.c */ /* gamma_dma.c */
extern int gamma_dma_init( struct inode *inode, struct file *filp, extern int gamma_dma_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
......
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
*/ */
#define __HAVE_RELEASE 1 #define __HAVE_RELEASE 1
#define DRIVER_RELEASE() do { \ #define DRIVER_RELEASE() do { \
i810_reclaim_buffers( dev, priv->pid ); \ i810_reclaim_buffers( filp ); \
} while (0) } while (0)
/* DMA customization: /* DMA customization:
......
...@@ -46,30 +46,10 @@ ...@@ -46,30 +46,10 @@
#define I810_BUF_UNMAPPED 0 #define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1 #define I810_BUF_MAPPED 1
#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define BEGIN_LP_RING(n) do { \ #define up_write up
if (0) DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \ #endif
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
virt = dev_priv->ring.virtual_start; \
} while (0)
#define ADVANCE_LP_RING() do { \
if (0) DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
#define OUT_RING(n) do { \
if (0) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
static inline void i810_print_status_page(drm_device_t *dev) static inline void i810_print_status_page(drm_device_t *dev)
{ {
...@@ -178,11 +158,7 @@ static int i810_map_buffer(drm_buf_t *buf, struct file *filp) ...@@ -178,11 +158,7 @@ static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL;
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem );
#else
down_write( &current->mm->mmap_sem ); down_write( &current->mm->mmap_sem );
#endif
old_fops = filp->f_op; old_fops = filp->f_op;
filp->f_op = &i810_buffer_fops; filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf; dev_priv->mmap_buffer = buf;
...@@ -194,15 +170,12 @@ static int i810_map_buffer(drm_buf_t *buf, struct file *filp) ...@@ -194,15 +170,12 @@ static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
filp->f_op = old_fops; filp->f_op = old_fops;
if ((unsigned long)buf_priv->virtual > -1024UL) { if ((unsigned long)buf_priv->virtual > -1024UL) {
/* Real error */ /* Real error */
DRM_DEBUG("mmap error\n"); DRM_ERROR("mmap error\n");
retcode = (signed int)buf_priv->virtual; retcode = (signed int)buf_priv->virtual;
buf_priv->virtual = 0; buf_priv->virtual = 0;
} }
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem ); up_write( &current->mm->mmap_sem );
#endif
return retcode; return retcode;
} }
...@@ -213,19 +186,13 @@ static int i810_unmap_buffer(drm_buf_t *buf) ...@@ -213,19 +186,13 @@ static int i810_unmap_buffer(drm_buf_t *buf)
if(buf_priv->currently_mapped != I810_BUF_MAPPED) if(buf_priv->currently_mapped != I810_BUF_MAPPED)
return -EINVAL; return -EINVAL;
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem ); down_write(&current->mm->mmap_sem);
#else
down_write( &current->mm->mmap_sem );
#endif
retcode = do_munmap(current->mm, retcode = do_munmap(current->mm,
(unsigned long)buf_priv->virtual, (unsigned long)buf_priv->virtual,
(size_t) buf->total); (size_t) buf->total);
#if LINUX_VERSION_CODE <= 0x020402 up_write(&current->mm->mmap_sem);
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem );
#endif
buf_priv->currently_mapped = I810_BUF_UNMAPPED; buf_priv->currently_mapped = I810_BUF_UNMAPPED;
buf_priv->virtual = 0; buf_priv->virtual = 0;
...@@ -235,7 +202,6 @@ static int i810_unmap_buffer(drm_buf_t *buf) ...@@ -235,7 +202,6 @@ static int i810_unmap_buffer(drm_buf_t *buf)
static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d, static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
struct file *filp) struct file *filp)
{ {
drm_file_t *priv = filp->private_data;
drm_buf_t *buf; drm_buf_t *buf;
drm_i810_buf_priv_t *buf_priv; drm_i810_buf_priv_t *buf_priv;
int retcode = 0; int retcode = 0;
...@@ -250,10 +216,10 @@ static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d, ...@@ -250,10 +216,10 @@ static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
retcode = i810_map_buffer(buf, filp); retcode = i810_map_buffer(buf, filp);
if(retcode) { if(retcode) {
i810_freelist_put(dev, buf); i810_freelist_put(dev, buf);
DRM_DEBUG("mapbuf failed, retcode %d\n", retcode); DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
return retcode; return retcode;
} }
buf->pid = priv->pid; buf->filp = filp;
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
d->granted = 1; d->granted = 1;
d->request_idx = buf->idx; d->request_idx = buf->idx;
...@@ -314,7 +280,7 @@ static int i810_wait_ring(drm_device_t *dev, int n) ...@@ -314,7 +280,7 @@ static int i810_wait_ring(drm_device_t *dev, int n)
end = jiffies + (HZ*3); end = jiffies + (HZ*3);
iters++; iters++;
if((signed)(end - jiffies) <= 0) { if(time_before(end, jiffies)) {
DRM_ERROR("space: %d wanted %d\n", ring->space, n); DRM_ERROR("space: %d wanted %d\n", ring->space, n);
DRM_ERROR("lockup\n"); DRM_ERROR("lockup\n");
goto out_wait_ring; goto out_wait_ring;
...@@ -882,8 +848,10 @@ static int i810_flush_queue(drm_device_t *dev) ...@@ -882,8 +848,10 @@ static int i810_flush_queue(drm_device_t *dev)
} }
/* Must be called with the lock held */ /* Must be called with the lock held */
void i810_reclaim_buffers(drm_device_t *dev, pid_t pid) void i810_reclaim_buffers(struct file *filp)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
int i; int i;
...@@ -897,7 +865,7 @@ void i810_reclaim_buffers(drm_device_t *dev, pid_t pid) ...@@ -897,7 +865,7 @@ void i810_reclaim_buffers(drm_device_t *dev, pid_t pid)
drm_buf_t *buf = dma->buflist[ i ]; drm_buf_t *buf = dma->buflist[ i ];
drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf->pid == pid && buf_priv) { if (buf->filp == filp && buf_priv) {
int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_FREE); I810_BUF_FREE);
......
...@@ -88,7 +88,7 @@ extern int i810_dma_init(struct inode *inode, struct file *filp, ...@@ -88,7 +88,7 @@ extern int i810_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int i810_flush_ioctl(struct inode *inode, struct file *filp, extern int i810_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern void i810_reclaim_buffers(drm_device_t *dev, pid_t pid); extern void i810_reclaim_buffers(struct file *filp);
extern int i810_getage(struct inode *inode, struct file *filp, extern int i810_getage(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma); extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
...@@ -136,6 +136,33 @@ int i810_clear_bufs(struct inode *inode, struct file *filp, ...@@ -136,6 +136,33 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
#define I810_READ16(reg) I810_DEREF16(reg) #define I810_READ16(reg) I810_DEREF16(reg)
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) #define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
#define I810_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask; \
volatile char *virt;
#define BEGIN_LP_RING(n) do { \
if (I810_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
virt = dev_priv->ring.virtual_start; \
} while (0)
#define ADVANCE_LP_RING() do { \
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
#define OUT_RING(n) do { \
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
...@@ -198,6 +225,7 @@ int i810_clear_bufs(struct inode *inode, struct file *filp, ...@@ -198,6 +225,7 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) #define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23))
#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) #define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23))
#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23))
#define BR00_BITBLT_CLIENT 0x40000000 #define BR00_BITBLT_CLIENT 0x40000000
#define BR00_OP_COLOR_BLT 0x10000000 #define BR00_OP_COLOR_BLT 0x10000000
......
...@@ -45,22 +45,37 @@ ...@@ -45,22 +45,37 @@
#define DRIVER_NAME "i830" #define DRIVER_NAME "i830"
#define DRIVER_DESC "Intel 830M" #define DRIVER_DESC "Intel 830M"
#define DRIVER_DATE "20020828" #define DRIVER_DATE "20021108"
/* Interface history:
*
* 1.1: Original.
* 1.2: ?
* 1.3: New irq emit/wait ioctls.
* New pageflip ioctl.
* New getparam ioctl.
* State for texunits 3&4 in sarea.
* New (alternative) layout for texture state.
*/
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 2 #define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 1 #define DRIVER_PATCHLEVEL 2
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_FLIP)] = { i830_flip_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_EMIT)] = { i830_irq_emit, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_WAIT)] = { i830_irq_wait, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_GETPARAM)] = { i830_getparam, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_SETPARAM)] = { i830_setparam, 1, 0 }
#define __HAVE_COUNTERS 4 #define __HAVE_COUNTERS 4
#define __HAVE_COUNTER6 _DRM_STAT_IRQ #define __HAVE_COUNTER6 _DRM_STAT_IRQ
...@@ -72,7 +87,7 @@ ...@@ -72,7 +87,7 @@
*/ */
#define __HAVE_RELEASE 1 #define __HAVE_RELEASE 1
#define DRIVER_RELEASE() do { \ #define DRIVER_RELEASE() do { \
i830_reclaim_buffers( dev, priv->pid ); \ i830_reclaim_buffers( filp ); \
} while (0) } while (0)
/* DMA customization: /* DMA customization:
...@@ -87,10 +102,49 @@ ...@@ -87,10 +102,49 @@
i830_dma_quiescent( dev ); \ i830_dma_quiescent( dev ); \
} while (0) } while (0)
/* Don't need an irq any more. The template code will make sure that
* a noop stub is generated for compatibility. /* Driver will work either way: IRQ's save cpu time when waiting for
* the card, but are subject to subtle interactions between bios,
* hardware and the driver.
*/
#define USE_IRQS 0
#if USE_IRQS
#define __HAVE_DMA_IRQ 1
#define __HAVE_SHARED_IRQ 1
#define DRIVER_PREINSTALL() do { \
drm_i830_private_t *dev_priv = \
(drm_i830_private_t *)dev->dev_private; \
\
I830_WRITE16( I830REG_HWSTAM, 0xffff ); \
I830_WRITE16( I830REG_INT_MASK_R, 0x0 ); \
I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 ); \
} while (0)
#define DRIVER_POSTINSTALL() do { \
drm_i830_private_t *dev_priv = \
(drm_i830_private_t *)dev->dev_private; \
I830_WRITE16( I830REG_INT_ENABLE_R, 0x2 ); \
atomic_set(&dev_priv->irq_received, 0); \
atomic_set(&dev_priv->irq_emitted, 0); \
init_waitqueue_head(&dev_priv->irq_queue); \
} while (0)
/* This gets called too late to be useful: dev_priv has already been
* freed.
*/ */
#define __HAVE_DMA_IRQ 0 #define DRIVER_UNINSTALL() do { \
} while (0)
#else
#define __HAVE_DMA_IRQ 0
#endif
/* Buffer customization: /* Buffer customization:
*/ */
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "i830_drm.h" #include "i830_drm.h"
#include "i830_drv.h" #include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */ #include <linux/interrupt.h> /* For task queue support */
#include <linux/pagemap.h> /* For FASTCALL on unlock_page() */
#include <linux/delay.h> #include <linux/delay.h>
#define I830_BUF_FREE 2 #define I830_BUF_FREE 2
...@@ -46,8 +47,6 @@ ...@@ -46,8 +47,6 @@
#define I830_BUF_UNMAPPED 0 #define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1 #define I830_BUF_MAPPED 1
#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2) #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down #define down_write down
#define up_write up #define up_write up
...@@ -60,32 +59,6 @@ ...@@ -60,32 +59,6 @@
#define UnlockPage(page) unlock_page(page) #define UnlockPage(page) unlock_page(page)
#endif #endif
#define I830_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d) in %s\n", \
n, __FUNCTION__); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
virt = dev_priv->ring.virtual_start; \
} while (0)
#define ADVANCE_LP_RING() do { \
if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \
dev_priv->ring.tail = outring; \
I830_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
#define OUT_RING(n) do { \
if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
static inline void i830_print_status_page(drm_device_t *dev) static inline void i830_print_status_page(drm_device_t *dev)
{ {
...@@ -237,7 +210,6 @@ static int i830_unmap_buffer(drm_buf_t *buf) ...@@ -237,7 +210,6 @@ static int i830_unmap_buffer(drm_buf_t *buf)
static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d, static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
struct file *filp) struct file *filp)
{ {
drm_file_t *priv = filp->private_data;
drm_buf_t *buf; drm_buf_t *buf;
drm_i830_buf_priv_t *buf_priv; drm_i830_buf_priv_t *buf_priv;
int retcode = 0; int retcode = 0;
...@@ -245,7 +217,7 @@ static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d, ...@@ -245,7 +217,7 @@ static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
buf = i830_freelist_get(dev); buf = i830_freelist_get(dev);
if (!buf) { if (!buf) {
retcode = -ENOMEM; retcode = -ENOMEM;
DRM_ERROR("retcode=%d\n", retcode); DRM_DEBUG("retcode=%d\n", retcode);
return retcode; return retcode;
} }
...@@ -255,7 +227,7 @@ static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d, ...@@ -255,7 +227,7 @@ static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
DRM_ERROR("mapbuf failed, retcode %d\n", retcode); DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
return retcode; return retcode;
} }
buf->pid = priv->pid; buf->filp = filp;
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
d->granted = 1; d->granted = 1;
d->request_idx = buf->idx; d->request_idx = buf->idx;
...@@ -279,12 +251,21 @@ static int i830_dma_cleanup(drm_device_t *dev) ...@@ -279,12 +251,21 @@ static int i830_dma_cleanup(drm_device_t *dev)
dev_priv->ring.Size); dev_priv->ring.Size);
} }
if(dev_priv->hw_status_page != 0UL) { if(dev_priv->hw_status_page != 0UL) {
pci_free_consistent(dev->pdev, PAGE_SIZE, pci_free_consistent(dev->pdev, PAGE_SIZE,
(void *)dev_priv->hw_status_page, (void *)dev_priv->hw_status_page,
dev_priv->dma_status_page); dev_priv->dma_status_page);
/* Need to rewrite hardware status page */ /* Need to rewrite hardware status page */
I830_WRITE(0x02080, 0x1ffff000); I830_WRITE(0x02080, 0x1ffff000);
} }
/* Disable interrupts here because after dev_private
* is freed, it's too late.
*/
if (dev->irq) {
I830_WRITE16( I830REG_INT_MASK_R, 0xffff );
I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 );
}
DRM(free)(dev->dev_private, sizeof(drm_i830_private_t), DRM(free)(dev->dev_private, sizeof(drm_i830_private_t),
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
dev->dev_private = NULL; dev->dev_private = NULL;
...@@ -298,7 +279,7 @@ static int i830_dma_cleanup(drm_device_t *dev) ...@@ -298,7 +279,7 @@ static int i830_dma_cleanup(drm_device_t *dev)
return 0; return 0;
} }
static int i830_wait_ring(drm_device_t *dev, int n) int i830_wait_ring(drm_device_t *dev, int n, const char *caller)
{ {
drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_ring_buffer_t *ring = &(dev_priv->ring); drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
...@@ -324,6 +305,7 @@ static int i830_wait_ring(drm_device_t *dev, int n) ...@@ -324,6 +305,7 @@ static int i830_wait_ring(drm_device_t *dev, int n)
goto out_wait_ring; goto out_wait_ring;
} }
udelay(1); udelay(1);
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
} }
out_wait_ring: out_wait_ring:
...@@ -339,6 +321,9 @@ static void i830_kernel_lost_context(drm_device_t *dev) ...@@ -339,6 +321,9 @@ static void i830_kernel_lost_context(drm_device_t *dev)
ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail+8); ring->space = ring->head - (ring->tail+8);
if (ring->space < 0) ring->space += ring->Size; if (ring->space < 0) ring->space += ring->Size;
if (ring->head == ring->tail)
dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
} }
static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv) static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
...@@ -453,6 +438,8 @@ static int i830_dma_initialize(drm_device_t *dev, ...@@ -453,6 +438,8 @@ static int i830_dma_initialize(drm_device_t *dev,
dev_priv->back_pitch = init->back_pitch; dev_priv->back_pitch = init->back_pitch;
dev_priv->depth_pitch = init->depth_pitch; dev_priv->depth_pitch = init->depth_pitch;
dev_priv->do_boxes = 0;
dev_priv->use_mi_batchbuffer_start = 0;
/* Program Hardware Status Page */ /* Program Hardware Status Page */
dev_priv->hw_status_page = dev_priv->hw_status_page =
...@@ -467,7 +454,7 @@ static int i830_dma_initialize(drm_device_t *dev, ...@@ -467,7 +454,7 @@ static int i830_dma_initialize(drm_device_t *dev,
memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
I830_WRITE(0x02080, dev_priv->dma_status_page); I830_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page));
DRM_DEBUG("Enabled hardware status page\n"); DRM_DEBUG("Enabled hardware status page\n");
/* Now we need to init our freelist */ /* Now we need to init our freelist */
...@@ -528,11 +515,7 @@ static void i830EmitContextVerified( drm_device_t *dev, ...@@ -528,11 +515,7 @@ static void i830EmitContextVerified( drm_device_t *dev,
unsigned int tmp; unsigned int tmp;
RING_LOCALS; RING_LOCALS;
BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 2 ); BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 4 );
OUT_RING( GFX_OP_STIPPLE );
OUT_RING( 0 );
for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) { for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) {
tmp = code[i]; tmp = code[i];
...@@ -570,38 +553,44 @@ static void i830EmitContextVerified( drm_device_t *dev, ...@@ -570,38 +553,44 @@ static void i830EmitContextVerified( drm_device_t *dev,
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
static void i830EmitTexVerified( drm_device_t *dev, static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code )
volatile unsigned int *code )
{ {
drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0; int i, j = 0;
unsigned int tmp; unsigned int tmp;
RING_LOCALS; RING_LOCALS;
BEGIN_LP_RING( I830_TEX_SETUP_SIZE ); if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
(code[I830_TEXREG_MI0] & ~(0xf*LOAD_TEXTURE_MAP0)) ==
(STATE3D_LOAD_STATE_IMMEDIATE_2|4)) {
OUT_RING( GFX_OP_MAP_INFO ); BEGIN_LP_RING( I830_TEX_SETUP_SIZE );
OUT_RING( code[I830_TEXREG_MI1] );
OUT_RING( code[I830_TEXREG_MI2] );
OUT_RING( code[I830_TEXREG_MI3] );
OUT_RING( code[I830_TEXREG_MI4] );
OUT_RING( code[I830_TEXREG_MI5] );
for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) { OUT_RING( code[I830_TEXREG_MI0] ); /* TM0LI */
tmp = code[i]; OUT_RING( code[I830_TEXREG_MI1] ); /* TM0S0 */
OUT_RING( tmp ); OUT_RING( code[I830_TEXREG_MI2] ); /* TM0S1 */
j++; OUT_RING( code[I830_TEXREG_MI3] ); /* TM0S2 */
} OUT_RING( code[I830_TEXREG_MI4] ); /* TM0S3 */
OUT_RING( code[I830_TEXREG_MI5] ); /* TM0S4 */
for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) {
tmp = code[i];
OUT_RING( tmp );
j++;
}
if (j & 1) if (j & 1)
OUT_RING( 0 ); OUT_RING( 0 );
ADVANCE_LP_RING(); ADVANCE_LP_RING();
}
else
printk("rejected packet %x\n", code[0]);
} }
static void i830EmitTexBlendVerified( drm_device_t *dev, static void i830EmitTexBlendVerified( drm_device_t *dev,
volatile unsigned int *code, unsigned int *code,
volatile unsigned int num) unsigned int num)
{ {
drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0; int i, j = 0;
...@@ -611,7 +600,7 @@ static void i830EmitTexBlendVerified( drm_device_t *dev, ...@@ -611,7 +600,7 @@ static void i830EmitTexBlendVerified( drm_device_t *dev,
if (!num) if (!num)
return; return;
BEGIN_LP_RING( num ); BEGIN_LP_RING( num + 1 );
for ( i = 0 ; i < num ; i++ ) { for ( i = 0 ; i < num ; i++ ) {
tmp = code[i]; tmp = code[i];
...@@ -634,6 +623,8 @@ static void i830EmitTexPalette( drm_device_t *dev, ...@@ -634,6 +623,8 @@ static void i830EmitTexPalette( drm_device_t *dev,
int i; int i;
RING_LOCALS; RING_LOCALS;
return;
BEGIN_LP_RING( 258 ); BEGIN_LP_RING( 258 );
if(is_shared == 1) { if(is_shared == 1) {
...@@ -647,42 +638,41 @@ static void i830EmitTexPalette( drm_device_t *dev, ...@@ -647,42 +638,41 @@ static void i830EmitTexPalette( drm_device_t *dev,
OUT_RING(palette[i]); OUT_RING(palette[i]);
} }
OUT_RING(0); OUT_RING(0);
/* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
*/
} }
/* Need to do some additional checking when setting the dest buffer. /* Need to do some additional checking when setting the dest buffer.
*/ */
static void i830EmitDestVerified( drm_device_t *dev, static void i830EmitDestVerified( drm_device_t *dev,
volatile unsigned int *code ) unsigned int *code )
{ {
drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_private_t *dev_priv = dev->dev_private;
unsigned int tmp; unsigned int tmp;
RING_LOCALS; RING_LOCALS;
BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 6 ); BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 10 );
tmp = code[I830_DESTREG_CBUFADDR]; tmp = code[I830_DESTREG_CBUFADDR];
if (tmp == dev_priv->front_di1) { if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
/* Don't use fence when front buffer rendering */ if (((int)outring) & 8) {
OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING(0);
OUT_RING( BUF_3D_ID_COLOR_BACK | OUT_RING(0);
BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) ); }
OUT_RING( tmp );
OUT_RING( CMD_OP_DESTBUFFER_INFO );
OUT_RING( BUF_3D_ID_DEPTH |
BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
OUT_RING( dev_priv->zi1 );
} else if(tmp == dev_priv->back_di1) {
OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( CMD_OP_DESTBUFFER_INFO );
OUT_RING( BUF_3D_ID_COLOR_BACK | OUT_RING( BUF_3D_ID_COLOR_BACK |
BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) | BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
BUF_3D_USE_FENCE); BUF_3D_USE_FENCE);
OUT_RING( tmp ); OUT_RING( tmp );
OUT_RING( 0 );
OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( CMD_OP_DESTBUFFER_INFO );
OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE | OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
OUT_RING( dev_priv->zi1 ); OUT_RING( dev_priv->zi1 );
OUT_RING( 0 );
} else { } else {
DRM_ERROR("bad di1 %x (allow %x or %x)\n", DRM_ERROR("bad di1 %x (allow %x or %x)\n",
tmp, dev_priv->front_di1, dev_priv->back_di1); tmp, dev_priv->front_di1, dev_priv->back_di1);
...@@ -710,21 +700,35 @@ static void i830EmitDestVerified( drm_device_t *dev, ...@@ -710,21 +700,35 @@ static void i830EmitDestVerified( drm_device_t *dev,
OUT_RING( 0 ); OUT_RING( 0 );
} }
OUT_RING( code[I830_DESTREG_SENABLE] );
OUT_RING( GFX_OP_SCISSOR_RECT ); OUT_RING( GFX_OP_SCISSOR_RECT );
OUT_RING( code[I830_DESTREG_SR1] ); OUT_RING( code[I830_DESTREG_SR1] );
OUT_RING( code[I830_DESTREG_SR2] ); OUT_RING( code[I830_DESTREG_SR2] );
OUT_RING( 0 );
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
static void i830EmitStippleVerified( drm_device_t *dev,
unsigned int *code )
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
BEGIN_LP_RING( 2 );
OUT_RING( GFX_OP_STIPPLE );
OUT_RING( code[1] );
ADVANCE_LP_RING();
}
static void i830EmitState( drm_device_t *dev ) static void i830EmitState( drm_device_t *dev )
{ {
drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty; unsigned int dirty = sarea_priv->dirty;
DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
if (dirty & I830_UPLOAD_BUFFERS) { if (dirty & I830_UPLOAD_BUFFERS) {
i830EmitDestVerified( dev, sarea_priv->BufferState ); i830EmitDestVerified( dev, sarea_priv->BufferState );
sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS; sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
...@@ -758,17 +762,154 @@ static void i830EmitState( drm_device_t *dev ) ...@@ -758,17 +762,154 @@ static void i830EmitState( drm_device_t *dev )
} }
if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) { if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
} else { } else {
if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
} }
if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
} }
/* 1.3:
*/
#if 0
if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
}
if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
}
#endif
}
/* 1.3:
*/
if (dirty & I830_UPLOAD_STIPPLE) {
i830EmitStippleVerified( dev,
sarea_priv->StippleState);
sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
} }
if (dirty & I830_UPLOAD_TEX2) {
i830EmitTexVerified( dev, sarea_priv->TexState2 );
sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
}
if (dirty & I830_UPLOAD_TEX3) {
i830EmitTexVerified( dev, sarea_priv->TexState3 );
sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
}
if (dirty & I830_UPLOAD_TEXBLEND2) {
i830EmitTexBlendVerified(
dev,
sarea_priv->TexBlendState2,
sarea_priv->TexBlendStateWordsUsed2);
sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
}
if (dirty & I830_UPLOAD_TEXBLEND3) {
i830EmitTexBlendVerified(
dev,
sarea_priv->TexBlendState3,
sarea_priv->TexBlendStateWordsUsed3);
sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
}
}
/* ================================================================
* Performance monitoring functions
*/
static void i830_fill_box( drm_device_t *dev,
int x, int y, int w, int h,
int r, int g, int b )
{
drm_i830_private_t *dev_priv = dev->dev_private;
u32 color;
unsigned int BR13, CMD;
RING_LOCALS;
BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1<<24);
CMD = XY_COLOR_BLT_CMD;
x += dev_priv->sarea_priv->boxes[0].x1;
y += dev_priv->sarea_priv->boxes[0].y1;
if (dev_priv->cpp == 4) {
BR13 |= (1<<25);
CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
} else {
color = (((r & 0xf8) << 8) |
((g & 0xfc) << 3) |
((b & 0xf8) >> 3));
}
BEGIN_LP_RING( 6 );
OUT_RING( CMD );
OUT_RING( BR13 );
OUT_RING( (y << 16) | x );
OUT_RING( ((y+h) << 16) | (x+w) );
if ( dev_priv->current_page == 1 ) {
OUT_RING( dev_priv->front_offset );
} else {
OUT_RING( dev_priv->back_offset );
}
OUT_RING( color );
ADVANCE_LP_RING();
}
static void i830_cp_performance_boxes( drm_device_t *dev )
{
drm_i830_private_t *dev_priv = dev->dev_private;
/* Purple box for page flipping
*/
if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP )
i830_fill_box( dev, 4, 4, 8, 8, 255, 0, 255 );
/* Red box if we have to wait for idle at any point
*/
if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT )
i830_fill_box( dev, 16, 4, 8, 8, 255, 0, 0 );
/* Blue box: lost context?
*/
if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT )
i830_fill_box( dev, 28, 4, 8, 8, 0, 0, 255 );
/* Yellow box for texture swaps
*/
if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD )
i830_fill_box( dev, 40, 4, 8, 8, 255, 255, 0 );
/* Green box if hardware never idles (as far as we can tell)
*/
if ( !(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY) )
i830_fill_box( dev, 64, 4, 8, 8, 0, 255, 0 );
/* Draw bars indicating number of buffers allocated
* (not a great measure, easily confused)
*/
if (dev_priv->dma_used) {
int bar = dev_priv->dma_used / 10240;
if (bar > 100) bar = 100;
if (bar < 1) bar = 1;
i830_fill_box( dev, 4, 16, bar, 4, 196, 128, 128 );
dev_priv->dma_used = 0;
}
dev_priv->sarea_priv->perf_boxes = 0;
} }
static void i830_dma_dispatch_clear( drm_device_t *dev, int flags, static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
...@@ -786,6 +927,15 @@ static void i830_dma_dispatch_clear( drm_device_t *dev, int flags, ...@@ -786,6 +927,15 @@ static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
unsigned int BR13, CMD, D_CMD; unsigned int BR13, CMD, D_CMD;
RING_LOCALS; RING_LOCALS;
if ( dev_priv->current_page == 1 ) {
unsigned int tmp = flags;
flags &= ~(I830_FRONT | I830_BACK);
if ( tmp & I830_FRONT ) flags |= I830_BACK;
if ( tmp & I830_BACK ) flags |= I830_FRONT;
}
i830_kernel_lost_context(dev); i830_kernel_lost_context(dev);
switch(cpp) { switch(cpp) {
...@@ -865,13 +1015,17 @@ static void i830_dma_dispatch_swap( drm_device_t *dev ) ...@@ -865,13 +1015,17 @@ static void i830_dma_dispatch_swap( drm_device_t *dev )
drm_clip_rect_t *pbox = sarea_priv->boxes; drm_clip_rect_t *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch; int pitch = dev_priv->pitch;
int cpp = dev_priv->cpp; int cpp = dev_priv->cpp;
int ofs = dev_priv->back_offset;
int i; int i;
unsigned int CMD, BR13; unsigned int CMD, BR13;
RING_LOCALS; RING_LOCALS;
DRM_DEBUG("swapbuffers\n"); DRM_DEBUG("swapbuffers\n");
i830_kernel_lost_context(dev);
if (dev_priv->do_boxes)
i830_cp_performance_boxes( dev );
switch(cpp) { switch(cpp) {
case 2: case 2:
BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24); BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
...@@ -888,7 +1042,6 @@ static void i830_dma_dispatch_swap( drm_device_t *dev ) ...@@ -888,7 +1042,6 @@ static void i830_dma_dispatch_swap( drm_device_t *dev )
break; break;
} }
i830_kernel_lost_context(dev);
if (nbox > I830_NR_SAREA_CLIPRECTS) if (nbox > I830_NR_SAREA_CLIPRECTS)
nbox = I830_NR_SAREA_CLIPRECTS; nbox = I830_NR_SAREA_CLIPRECTS;
...@@ -908,23 +1061,72 @@ static void i830_dma_dispatch_swap( drm_device_t *dev ) ...@@ -908,23 +1061,72 @@ static void i830_dma_dispatch_swap( drm_device_t *dev )
BEGIN_LP_RING( 8 ); BEGIN_LP_RING( 8 );
OUT_RING( CMD ); OUT_RING( CMD );
OUT_RING( BR13 ); OUT_RING( BR13 );
OUT_RING( (pbox->y1 << 16) | pbox->x1 );
OUT_RING( (pbox->y2 << 16) | pbox->x2 );
OUT_RING( (pbox->y1 << 16) | if (dev_priv->current_page == 0)
pbox->x1 ); OUT_RING( dev_priv->front_offset );
OUT_RING( (pbox->y2 << 16) | else
pbox->x2 ); OUT_RING( dev_priv->back_offset );
OUT_RING( dev_priv->front_offset );
OUT_RING( (pbox->y1 << 16) |
pbox->x1 );
OUT_RING( (pbox->y1 << 16) | pbox->x1 );
OUT_RING( BR13 & 0xffff ); OUT_RING( BR13 & 0xffff );
OUT_RING( ofs );
if (dev_priv->current_page == 0)
OUT_RING( dev_priv->back_offset );
else
OUT_RING( dev_priv->front_offset );
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
} }
static void i830_dma_dispatch_flip( drm_device_t *dev )
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
__FUNCTION__,
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
i830_kernel_lost_context(dev);
if (dev_priv->do_boxes) {
dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
i830_cp_performance_boxes( dev );
}
BEGIN_LP_RING( 2 );
OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
OUT_RING( 0 );
ADVANCE_LP_RING();
BEGIN_LP_RING( 6 );
OUT_RING( CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP );
OUT_RING( 0 );
if ( dev_priv->current_page == 0 ) {
OUT_RING( dev_priv->back_offset );
dev_priv->current_page = 1;
} else {
OUT_RING( dev_priv->front_offset );
dev_priv->current_page = 0;
}
OUT_RING(0);
ADVANCE_LP_RING();
BEGIN_LP_RING( 2 );
OUT_RING( MI_WAIT_FOR_EVENT |
MI_WAIT_FOR_PLANE_A_FLIP );
OUT_RING( 0 );
ADVANCE_LP_RING();
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
static void i830_dma_dispatch_vertex(drm_device_t *dev, static void i830_dma_dispatch_vertex(drm_device_t *dev,
drm_buf_t *buf, drm_buf_t *buf,
...@@ -977,8 +1179,10 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev, ...@@ -977,8 +1179,10 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev,
sarea_priv->vertex_prim | sarea_priv->vertex_prim |
((used/4)-2)); ((used/4)-2));
vp[used/4] = MI_BATCH_BUFFER_END; if (dev_priv->use_mi_batchbuffer_start) {
used += 4; vp[used/4] = MI_BATCH_BUFFER_END;
used += 4;
}
if (used & 4) { if (used & 4) {
vp[used/4] = 0; vp[used/4] = 0;
...@@ -1001,11 +1205,21 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev, ...@@ -1001,11 +1205,21 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev,
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
BEGIN_LP_RING(2); if (dev_priv->use_mi_batchbuffer_start) {
OUT_RING( MI_BATCH_BUFFER_START | (2<<6) ); BEGIN_LP_RING(2);
OUT_RING( start | MI_BATCH_NON_SECURE ); OUT_RING( MI_BATCH_BUFFER_START | (2<<6) );
ADVANCE_LP_RING(); OUT_RING( start | MI_BATCH_NON_SECURE );
ADVANCE_LP_RING();
}
else {
BEGIN_LP_RING(4);
OUT_RING( MI_BATCH_BUFFER );
OUT_RING( start | MI_BATCH_NON_SECURE );
OUT_RING( start + used - 4 );
OUT_RING( 0 );
ADVANCE_LP_RING();
}
} while (++i < nbox); } while (++i < nbox);
} }
...@@ -1043,7 +1257,7 @@ void i830_dma_quiescent(drm_device_t *dev) ...@@ -1043,7 +1257,7 @@ void i830_dma_quiescent(drm_device_t *dev)
OUT_RING( 0 ); OUT_RING( 0 );
ADVANCE_LP_RING(); ADVANCE_LP_RING();
i830_wait_ring( dev, dev_priv->ring.Size - 8 ); i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
} }
static int i830_flush_queue(drm_device_t *dev) static int i830_flush_queue(drm_device_t *dev)
...@@ -1060,7 +1274,7 @@ static int i830_flush_queue(drm_device_t *dev) ...@@ -1060,7 +1274,7 @@ static int i830_flush_queue(drm_device_t *dev)
OUT_RING( 0 ); OUT_RING( 0 );
ADVANCE_LP_RING(); ADVANCE_LP_RING();
i830_wait_ring( dev, dev_priv->ring.Size - 8 ); i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
for (i = 0; i < dma->buf_count; i++) { for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[ i ]; drm_buf_t *buf = dma->buflist[ i ];
...@@ -1079,8 +1293,10 @@ static int i830_flush_queue(drm_device_t *dev) ...@@ -1079,8 +1293,10 @@ static int i830_flush_queue(drm_device_t *dev)
} }
/* Must be called with the lock held */ /* Must be called with the lock held */
void i830_reclaim_buffers(drm_device_t *dev, pid_t pid) void i830_reclaim_buffers( struct file *filp )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
int i; int i;
...@@ -1094,7 +1310,7 @@ void i830_reclaim_buffers(drm_device_t *dev, pid_t pid) ...@@ -1094,7 +1310,7 @@ void i830_reclaim_buffers(drm_device_t *dev, pid_t pid)
drm_buf_t *buf = dma->buflist[ i ]; drm_buf_t *buf = dma->buflist[ i ];
drm_i830_buf_priv_t *buf_priv = buf->dev_private; drm_i830_buf_priv_t *buf_priv = buf->dev_private;
if (buf->pid == pid && buf_priv) { if (buf->filp == filp && buf_priv) {
int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
I830_BUF_FREE); I830_BUF_FREE);
...@@ -1200,6 +1416,53 @@ int i830_swap_bufs(struct inode *inode, struct file *filp, ...@@ -1200,6 +1416,53 @@ int i830_swap_bufs(struct inode *inode, struct file *filp,
return 0; return 0;
} }
/* Not sure why this isn't set all the time:
*/
static void i830_do_init_pageflip( drm_device_t *dev )
{
drm_i830_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
int i830_do_cleanup_pageflip( drm_device_t *dev )
{
drm_i830_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
if (dev_priv->current_page != 0)
i830_dma_dispatch_flip( dev );
dev_priv->page_flipping = 0;
return 0;
}
int i830_flip_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i830_flip_buf called without lock held\n");
return -EINVAL;
}
if (!dev_priv->page_flipping)
i830_do_init_pageflip( dev );
i830_dma_dispatch_flip( dev );
return 0;
}
int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
...@@ -1263,3 +1526,66 @@ int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, ...@@ -1263,3 +1526,66 @@ int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
{ {
return 0; return 0;
} }
int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_getparam_t param;
int value;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return -EINVAL;
}
if (copy_from_user(&param, (drm_i830_getparam_t *)arg, sizeof(param) ))
return -EFAULT;
switch( param.param ) {
case I830_PARAM_IRQ_ACTIVE:
value = dev->irq ? 1 : 0;
break;
default:
return -EINVAL;
}
if ( copy_to_user( param.value, &value, sizeof(int) ) ) {
DRM_ERROR( "copy_to_user\n" );
return -EFAULT;
}
return 0;
}
int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_setparam_t param;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return -EINVAL;
}
if (copy_from_user(&param, (drm_i830_setparam_t *)arg, sizeof(param) ))
return -EFAULT;
switch( param.param ) {
case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
dev_priv->use_mi_batchbuffer_start = param.value;
break;
default:
return -EINVAL;
}
return 0;
}
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
/* WARNING: These defines must be the same as what the Xserver uses. /* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver. * if you change them, you must change the defines in the Xserver.
*
* KW: Actually, you can't ever change them because doing so would
* break backwards compatibility.
*/ */
#ifndef _I830_DEFINES_ #ifndef _I830_DEFINES_
...@@ -18,14 +21,12 @@ ...@@ -18,14 +21,12 @@
#define I830_NR_TEX_REGIONS 64 #define I830_NR_TEX_REGIONS 64
#define I830_LOG_MIN_TEX_REGION_SIZE 16 #define I830_LOG_MIN_TEX_REGION_SIZE 16
/* if defining I830_ENABLE_4_TEXTURES, do it in i830_3d_reg.h, too */ /* KW: These aren't correct but someone set them to two and then
#if !defined(I830_ENABLE_4_TEXTURES) * released the module. Now we can't change them as doing so would
* break backwards compatibility.
*/
#define I830_TEXTURE_COUNT 2 #define I830_TEXTURE_COUNT 2
#define I830_TEXBLEND_COUNT 2 /* always same as TEXTURE_COUNT? */ #define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
#else /* defined(I830_ENABLE_4_TEXTURES) */
#define I830_TEXTURE_COUNT 4
#define I830_TEXBLEND_COUNT 4 /* always same as TEXTURE_COUNT? */
#endif /* I830_ENABLE_4_TEXTURES */
#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */ #define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
...@@ -57,6 +58,7 @@ ...@@ -57,6 +58,7 @@
#define I830_UPLOAD_TEXBLEND_MASK 0xf00000 #define I830_UPLOAD_TEXBLEND_MASK 0xf00000
#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n)) #define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000 #define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
#define I830_UPLOAD_STIPPLE 0x8000000
/* Indices into buf.Setup where various bits of state are mirrored per /* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit, * context and per buffer. These can be fired at the card as a unit,
...@@ -73,7 +75,6 @@ ...@@ -73,7 +75,6 @@
*/ */
#define I830_DESTREG_CBUFADDR 0 #define I830_DESTREG_CBUFADDR 0
/* Invarient */
#define I830_DESTREG_DBUFADDR 1 #define I830_DESTREG_DBUFADDR 1
#define I830_DESTREG_DV0 2 #define I830_DESTREG_DV0 2
#define I830_DESTREG_DV1 3 #define I830_DESTREG_DV1 3
...@@ -109,6 +110,13 @@ ...@@ -109,6 +110,13 @@
#define I830_CTXREG_MCSB1 16 #define I830_CTXREG_MCSB1 16
#define I830_CTX_SETUP_SIZE 17 #define I830_CTX_SETUP_SIZE 17
/* 1.3: Stipple state
*/
#define I830_STPREG_ST0 0
#define I830_STPREG_ST1 1
#define I830_STP_SETUP_SIZE 2
/* Texture state (per tex unit) /* Texture state (per tex unit)
*/ */
...@@ -124,6 +132,18 @@ ...@@ -124,6 +132,18 @@
#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */ #define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
#define I830_TEX_SETUP_SIZE 10 #define I830_TEX_SETUP_SIZE 10
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0S0 1
#define I830_TEXREG_TM0S1 2
#define I830_TEXREG_TM0S2 3
#define I830_TEXREG_TM0S3 4
#define I830_TEXREG_TM0S4 5
#define I830_TEXREG_NOP0 6 /* noop */
#define I830_TEXREG_NOP1 7 /* noop */
#define I830_TEXREG_NOP2 8 /* noop */
#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
#define __I830_TEX_SETUP_SIZE 10
#define I830_FRONT 0x1 #define I830_FRONT 0x1
#define I830_BACK 0x2 #define I830_BACK 0x2
#define I830_DEPTH 0x4 #define I830_DEPTH 0x4
...@@ -199,8 +219,35 @@ typedef struct _drm_i830_sarea { ...@@ -199,8 +219,35 @@ typedef struct _drm_i830_sarea {
int ctxOwner; /* last context to upload state */ int ctxOwner; /* last context to upload state */
int vertex_prim; int vertex_prim;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
/* Here's the state for texunits 2,3:
*/
unsigned int TexState2[I830_TEX_SETUP_SIZE];
unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
unsigned int TexBlendStateWordsUsed2;
unsigned int TexState3[I830_TEX_SETUP_SIZE];
unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
unsigned int TexBlendStateWordsUsed3;
unsigned int StippleState[I830_STP_SETUP_SIZE];
} drm_i830_sarea_t; } drm_i830_sarea_t;
/* Flags for perf_boxes
*/
#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
#define I830_BOX_FLIP 0x2 /* populated by kernel */
#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
/* I830 specific ioctls /* I830 specific ioctls
* The device specific ioctl range is 0x40 to 0x79. * The device specific ioctl range is 0x40 to 0x79.
*/ */
...@@ -213,6 +260,11 @@ typedef struct _drm_i830_sarea { ...@@ -213,6 +260,11 @@ typedef struct _drm_i830_sarea {
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46) #define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t) #define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48) #define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#define DRM_IOCTL_I830_FLIP DRM_IO ( 0x49)
#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(0x4a, drm_i830_irq_emit_t)
#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( 0x4b, drm_i830_irq_wait_t)
#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(0x4c, drm_i830_getparam_t)
#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(0x4d, drm_i830_setparam_t)
typedef struct _drm_i830_clear { typedef struct _drm_i830_clear {
int clear_color; int clear_color;
...@@ -248,4 +300,36 @@ typedef struct drm_i830_dma { ...@@ -248,4 +300,36 @@ typedef struct drm_i830_dma {
int granted; int granted;
} drm_i830_dma_t; } drm_i830_dma_t;
/* 1.3: Userspace can request & wait on irq's:
*/
typedef struct drm_i830_irq_emit {
int *irq_seq;
} drm_i830_irq_emit_t;
typedef struct drm_i830_irq_wait {
int irq_seq;
} drm_i830_irq_wait_t;
/* 1.3: New ioctl to query kernel params:
*/
#define I830_PARAM_IRQ_ACTIVE 1
typedef struct drm_i830_getparam {
int param;
int *value;
} drm_i830_getparam_t;
/* 1.3: New ioctl to set kernel params:
*/
#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
typedef struct drm_i830_setparam {
int param;
int value;
} drm_i830_setparam_t;
#endif /* _I830_DRM_H_ */ #endif /* _I830_DRM_H_ */
...@@ -78,6 +78,19 @@ typedef struct drm_i830_private { ...@@ -78,6 +78,19 @@ typedef struct drm_i830_private {
int back_pitch; int back_pitch;
int depth_pitch; int depth_pitch;
unsigned int cpp; unsigned int cpp;
int do_boxes;
int dma_used;
int current_page;
int page_flipping;
wait_queue_head_t irq_queue;
atomic_t irq_received;
atomic_t irq_emitted;
int use_mi_batchbuffer_start;
} drm_i830_private_t; } drm_i830_private_t;
/* i830_dma.c */ /* i830_dma.c */
...@@ -88,7 +101,7 @@ extern int i830_dma_init(struct inode *inode, struct file *filp, ...@@ -88,7 +101,7 @@ extern int i830_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int i830_flush_ioctl(struct inode *inode, struct file *filp, extern int i830_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern void i830_reclaim_buffers(drm_device_t *dev, pid_t pid); extern void i830_reclaim_buffers(struct file *filp);
extern int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, extern int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg); unsigned long arg);
extern int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma); extern int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
...@@ -108,6 +121,23 @@ extern int i830_swap_bufs(struct inode *inode, struct file *filp, ...@@ -108,6 +121,23 @@ extern int i830_swap_bufs(struct inode *inode, struct file *filp,
extern int i830_clear_bufs(struct inode *inode, struct file *filp, extern int i830_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int i830_flip_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_getparam( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int i830_setparam( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* i830_irq.c */
extern int i830_irq_emit( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int i830_irq_wait( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int i830_wait_irq(drm_device_t *dev, int irq_nr);
extern int i830_emit_irq(drm_device_t *dev);
#define I830_BASE(reg) ((unsigned long) \ #define I830_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle) dev_priv->mmio_map->handle)
...@@ -119,12 +149,53 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp, ...@@ -119,12 +149,53 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp,
#define I830_READ16(reg) I830_DEREF16(reg) #define I830_READ16(reg) I830_DEREF16(reg)
#define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0) #define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0)
#define I830_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d) in %s\n", \
n, __FUNCTION__); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4, __FUNCTION__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
virt = dev_priv->ring.virtual_start; \
} while (0)
#define OUT_RING(n) do { \
if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outcount++; \
outring += 4; \
outring &= ringmask; \
} while (0)
#define ADVANCE_LP_RING() do { \
if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I830_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23) #define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) #define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
#define LOAD_TEXTURE_MAP0 (1<<11)
#define INST_PARSER_CLIENT 0x00000000 #define INST_PARSER_CLIENT 0x00000000
#define INST_OP_FLUSH 0x02000000 #define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001 #define INST_FLUSH_MAP_CACHE 0x00000001
...@@ -140,6 +211,9 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp, ...@@ -140,6 +211,9 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp,
#define I830REG_INT_MASK_R 0x020a8 #define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0 #define I830REG_INT_ENABLE_R 0x020a0
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
#define LP_RING 0x2030 #define LP_RING 0x2030
#define HP_RING 0x2040 #define HP_RING 0x2040
#define RING_TAIL 0x00 #define RING_TAIL 0x00
...@@ -182,6 +256,9 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp, ...@@ -182,6 +256,9 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp,
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22)
#define CMD_3D (0x3<<29) #define CMD_3D (0x3<<29)
#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16)) #define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16)) #define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
...@@ -213,6 +290,11 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp, ...@@ -213,6 +290,11 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp,
#define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE (1)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
#endif #endif
/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
*
* Copyright 2002 Tungsten Graphics, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Keith Whitwell <keith@tungstengraphics.com>
*
*/
#include "i830.h"
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
void DRM(dma_service)(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
u16 temp;
temp = I830_READ16(I830REG_INT_IDENTITY_R);
printk("%s: %x\n", __FUNCTION__, temp);
if(temp == 0)
return;
I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
if (temp & 2) {
atomic_inc(&dev_priv->irq_received);
wake_up_interruptible(&dev_priv->irq_queue);
}
}
int i830_emit_irq(drm_device_t *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_emitted);
BEGIN_LP_RING(2);
OUT_RING( 0 );
OUT_RING( GFX_OP_USER_INTERRUPT );
ADVANCE_LP_RING();
return atomic_read(&dev_priv->irq_emitted);
}
int i830_wait_irq(drm_device_t *dev, int irq_nr)
{
drm_i830_private_t *dev_priv =
(drm_i830_private_t *)dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
unsigned long end = jiffies + HZ*3;
int ret = 0;
DRM_DEBUG("%s\n", __FUNCTION__);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
return 0;
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
add_wait_queue(&dev_priv->irq_queue, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
break;
if((signed)(end - jiffies) <= 0) {
DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
I830_READ16( I830REG_INT_IDENTITY_R ),
I830_READ16( I830REG_INT_MASK_R ),
I830_READ16( I830REG_INT_ENABLE_R ),
I830_READ16( I830REG_HWSTAM ));
ret = -EBUSY; /* Lockup? Missed irq? */
break;
}
schedule_timeout(HZ*3);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev_priv->irq_queue, &entry);
return ret;
}
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_emit_t emit;
int result;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i830_irq_emit called without lock held\n");
return -EINVAL;
}
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return -EINVAL;
}
if (copy_from_user( &emit, (drm_i830_irq_emit_t *)arg, sizeof(emit) ))
return -EFAULT;
result = i830_emit_irq( dev );
if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) {
DRM_ERROR( "copy_to_user\n" );
return -EFAULT;
}
return 0;
}
/* Doesn't need the hardware lock.
*/
int i830_irq_wait( struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_wait_t irqwait;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return -EINVAL;
}
if (copy_from_user( &irqwait, (drm_i830_irq_wait_t *)arg,
sizeof(irqwait) ))
return -EFAULT;
return i830_wait_irq( dev, irqwait.irq_seq );
}
...@@ -686,7 +686,7 @@ int mga_dma_flush( DRM_IOCTL_ARGS ) ...@@ -686,7 +686,7 @@ int mga_dma_flush( DRM_IOCTL_ARGS )
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_lock_t lock; drm_lock_t lock;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) ); DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
...@@ -720,7 +720,7 @@ int mga_dma_reset( DRM_IOCTL_ARGS ) ...@@ -720,7 +720,7 @@ int mga_dma_reset( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
return mga_do_dma_reset( dev_priv ); return mga_do_dma_reset( dev_priv );
} }
...@@ -730,7 +730,8 @@ int mga_dma_reset( DRM_IOCTL_ARGS ) ...@@ -730,7 +730,8 @@ int mga_dma_reset( DRM_IOCTL_ARGS )
* DMA buffer management * DMA buffer management
*/ */
static int mga_dma_get_buffers( drm_device_t *dev, drm_dma_t *d ) static int mga_dma_get_buffers( DRMFILE filp,
drm_device_t *dev, drm_dma_t *d )
{ {
drm_buf_t *buf; drm_buf_t *buf;
int i; int i;
...@@ -739,7 +740,7 @@ static int mga_dma_get_buffers( drm_device_t *dev, drm_dma_t *d ) ...@@ -739,7 +740,7 @@ static int mga_dma_get_buffers( drm_device_t *dev, drm_dma_t *d )
buf = mga_freelist_get( dev ); buf = mga_freelist_get( dev );
if ( !buf ) return DRM_ERR(EAGAIN); if ( !buf ) return DRM_ERR(EAGAIN);
buf->pid = DRM_CURRENTPID; buf->filp = filp;
if ( DRM_COPY_TO_USER( &d->request_indices[i], if ( DRM_COPY_TO_USER( &d->request_indices[i],
&buf->idx, sizeof(buf->idx) ) ) &buf->idx, sizeof(buf->idx) ) )
...@@ -761,7 +762,7 @@ int mga_dma_buffers( DRM_IOCTL_ARGS ) ...@@ -761,7 +762,7 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
drm_dma_t d; drm_dma_t d;
int ret = 0; int ret = 0;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) ); DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
...@@ -786,7 +787,7 @@ int mga_dma_buffers( DRM_IOCTL_ARGS ) ...@@ -786,7 +787,7 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
d.granted_count = 0; d.granted_count = 0;
if ( d.request_count ) { if ( d.request_count ) {
ret = mga_dma_get_buffers( dev, &d ); ret = mga_dma_get_buffers( filp, dev, &d );
} }
DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) ); DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
......
...@@ -90,14 +90,14 @@ typedef struct drm_mga_private { ...@@ -90,14 +90,14 @@ typedef struct drm_mga_private {
unsigned int texture_offset; unsigned int texture_offset;
unsigned int texture_size; unsigned int texture_size;
drm_map_t *sarea; drm_local_map_t *sarea;
drm_map_t *fb; drm_local_map_t *fb;
drm_map_t *mmio; drm_local_map_t *mmio;
drm_map_t *status; drm_local_map_t *status;
drm_map_t *warp; drm_local_map_t *warp;
drm_map_t *primary; drm_local_map_t *primary;
drm_map_t *buffers; drm_local_map_t *buffers;
drm_map_t *agp_textures; drm_local_map_t *agp_textures;
} drm_mga_private_t; } drm_mga_private_t;
/* mga_dma.c */ /* mga_dma.c */
...@@ -131,32 +131,30 @@ extern int mga_getparam( DRM_IOCTL_ARGS ); ...@@ -131,32 +131,30 @@ extern int mga_getparam( DRM_IOCTL_ARGS );
extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv ); extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv );
extern int mga_warp_init( drm_mga_private_t *dev_priv ); extern int mga_warp_init( drm_mga_private_t *dev_priv );
#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() #define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER(dev_priv->primary)
#if defined(__linux__) && defined(__alpha__)
#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle)) #define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle))
#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg) #define MGA_ADDR( reg ) (MGA_BASE(reg) + reg)
#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg ) #define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg )
#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg ) #define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg )
#ifdef __alpha__
#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg))) #define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg))) #define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0) #define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF( reg ) = val; } while (0)
#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0) #define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF8( reg ) = val; } while (0)
static inline u32 _MGA_READ(u32 *addr) static inline u32 _MGA_READ(u32 *addr)
{ {
DRM_READMEMORYBARRIER(); DRM_READMEMORYBARRIER(dev_priv->mmio);
return *(volatile u32 *)addr; return *(volatile u32 *)addr;
} }
#else #else
#define MGA_READ( reg ) MGA_DEREF( reg ) #define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg))
#define MGA_READ8( reg ) MGA_DEREF8( reg ) #define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg))
#define MGA_WRITE( reg, val ) do { MGA_DEREF( reg ) = val; } while (0) #define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val))
#define MGA_WRITE8( reg, val ) do { MGA_DEREF8( reg ) = val; } while (0) #define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif #endif
#define DWGREG0 0x1c00 #define DWGREG0 0x1c00
...@@ -186,16 +184,6 @@ do { \ ...@@ -186,16 +184,6 @@ do { \
} \ } \
} while (0) } while (0)
#define LOCK_TEST_WITH_RETURN( dev ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.pid != DRM_CURRENTPID ) { \
DRM_ERROR( "%s called without lock held\n", \
__FUNCTION__ ); \
return DRM_ERR(EINVAL); \
} \
} while (0)
#define WRAP_TEST_WITH_RETURN( dev_priv ) \ #define WRAP_TEST_WITH_RETURN( dev_priv ) \
do { \ do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
......
...@@ -887,7 +887,7 @@ int mga_dma_clear( DRM_IOCTL_ARGS ) ...@@ -887,7 +887,7 @@ int mga_dma_clear( DRM_IOCTL_ARGS )
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_clear_t clear; drm_mga_clear_t clear;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t *)data, sizeof(clear) ); DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t *)data, sizeof(clear) );
...@@ -911,7 +911,7 @@ int mga_dma_swap( DRM_IOCTL_ARGS ) ...@@ -911,7 +911,7 @@ int mga_dma_swap( DRM_IOCTL_ARGS )
drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS ) if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
...@@ -936,7 +936,7 @@ int mga_dma_vertex( DRM_IOCTL_ARGS ) ...@@ -936,7 +936,7 @@ int mga_dma_vertex( DRM_IOCTL_ARGS )
drm_mga_buf_priv_t *buf_priv; drm_mga_buf_priv_t *buf_priv;
drm_mga_vertex_t vertex; drm_mga_vertex_t vertex;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( vertex, DRM_COPY_FROM_USER_IOCTL( vertex,
(drm_mga_vertex_t *)data, (drm_mga_vertex_t *)data,
...@@ -975,7 +975,7 @@ int mga_dma_indices( DRM_IOCTL_ARGS ) ...@@ -975,7 +975,7 @@ int mga_dma_indices( DRM_IOCTL_ARGS )
drm_mga_buf_priv_t *buf_priv; drm_mga_buf_priv_t *buf_priv;
drm_mga_indices_t indices; drm_mga_indices_t indices;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( indices, DRM_COPY_FROM_USER_IOCTL( indices,
(drm_mga_indices_t *)data, (drm_mga_indices_t *)data,
...@@ -1015,7 +1015,7 @@ int mga_dma_iload( DRM_IOCTL_ARGS ) ...@@ -1015,7 +1015,7 @@ int mga_dma_iload( DRM_IOCTL_ARGS )
drm_mga_iload_t iload; drm_mga_iload_t iload;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t *)data, sizeof(iload) ); DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t *)data, sizeof(iload) );
...@@ -1055,7 +1055,7 @@ int mga_dma_blit( DRM_IOCTL_ARGS ) ...@@ -1055,7 +1055,7 @@ int mga_dma_blit( DRM_IOCTL_ARGS )
drm_mga_blit_t blit; drm_mga_blit_t blit;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t *)data, sizeof(blit) ); DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t *)data, sizeof(blit) );
......
...@@ -579,6 +579,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -579,6 +579,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
(dev_priv->ring.size / sizeof(u32)) - 1; (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = 128; dev_priv->ring.high_mark = 128;
dev_priv->ring.ring_rptr = dev_priv->ring_rptr;
dev_priv->sarea_priv->last_frame = 0; dev_priv->sarea_priv->last_frame = 0;
R128_WRITE( R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame ); R128_WRITE( R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame );
...@@ -663,7 +664,7 @@ int r128_cce_start( DRM_IOCTL_ARGS ) ...@@ -663,7 +664,7 @@ int r128_cce_start( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4 ) { if ( dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4 ) {
DRM_DEBUG( "%s while CCE running\n", __FUNCTION__ ); DRM_DEBUG( "%s while CCE running\n", __FUNCTION__ );
...@@ -686,7 +687,7 @@ int r128_cce_stop( DRM_IOCTL_ARGS ) ...@@ -686,7 +687,7 @@ int r128_cce_stop( DRM_IOCTL_ARGS )
int ret; int ret;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t *)data, sizeof(stop) ); DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t *)data, sizeof(stop) );
...@@ -725,7 +726,7 @@ int r128_cce_reset( DRM_IOCTL_ARGS ) ...@@ -725,7 +726,7 @@ int r128_cce_reset( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_DEBUG( "%s called before init done\n", __FUNCTION__ ); DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
...@@ -746,7 +747,7 @@ int r128_cce_idle( DRM_IOCTL_ARGS ) ...@@ -746,7 +747,7 @@ int r128_cce_idle( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( dev_priv->cce_running ) { if ( dev_priv->cce_running ) {
r128_do_cce_flush( dev_priv ); r128_do_cce_flush( dev_priv );
...@@ -760,7 +761,7 @@ int r128_engine_reset( DRM_IOCTL_ARGS ) ...@@ -760,7 +761,7 @@ int r128_engine_reset( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
return r128_do_engine_reset( dev ); return r128_do_engine_reset( dev );
} }
...@@ -807,7 +808,7 @@ int r128_fullscreen( DRM_IOCTL_ARGS ) ...@@ -807,7 +808,7 @@ int r128_fullscreen( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
drm_r128_fullscreen_t fs; drm_r128_fullscreen_t fs;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( fs, (drm_r128_fullscreen_t *)data, sizeof(fs) ); DRM_COPY_FROM_USER_IOCTL( fs, (drm_r128_fullscreen_t *)data, sizeof(fs) );
...@@ -889,7 +890,7 @@ drm_buf_t *r128_freelist_get( drm_device_t *dev ) ...@@ -889,7 +890,7 @@ drm_buf_t *r128_freelist_get( drm_device_t *dev )
for ( i = 0 ; i < dma->buf_count ; i++ ) { for ( i = 0 ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i]; buf = dma->buflist[i];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid == 0 ) if ( buf->filp == 0 )
return buf; return buf;
} }
...@@ -948,7 +949,7 @@ int r128_wait_ring( drm_r128_private_t *dev_priv, int n ) ...@@ -948,7 +949,7 @@ int r128_wait_ring( drm_r128_private_t *dev_priv, int n )
return DRM_ERR(EBUSY); return DRM_ERR(EBUSY);
} }
static int r128_cce_get_buffers( drm_device_t *dev, drm_dma_t *d ) static int r128_cce_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
{ {
int i; int i;
drm_buf_t *buf; drm_buf_t *buf;
...@@ -957,7 +958,7 @@ static int r128_cce_get_buffers( drm_device_t *dev, drm_dma_t *d ) ...@@ -957,7 +958,7 @@ static int r128_cce_get_buffers( drm_device_t *dev, drm_dma_t *d )
buf = r128_freelist_get( dev ); buf = r128_freelist_get( dev );
if ( !buf ) return DRM_ERR(EAGAIN); if ( !buf ) return DRM_ERR(EAGAIN);
buf->pid = DRM_CURRENTPID; buf->filp = filp;
if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx, if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx,
sizeof(buf->idx) ) ) sizeof(buf->idx) ) )
...@@ -978,7 +979,7 @@ int r128_cce_buffers( DRM_IOCTL_ARGS ) ...@@ -978,7 +979,7 @@ int r128_cce_buffers( DRM_IOCTL_ARGS )
int ret = 0; int ret = 0;
drm_dma_t d; drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *) data, sizeof(d) ); DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *) data, sizeof(d) );
...@@ -1001,7 +1002,7 @@ int r128_cce_buffers( DRM_IOCTL_ARGS ) ...@@ -1001,7 +1002,7 @@ int r128_cce_buffers( DRM_IOCTL_ARGS )
d.granted_count = 0; d.granted_count = 0;
if ( d.request_count ) { if ( d.request_count ) {
ret = r128_cce_get_buffers( dev, &d ); ret = r128_cce_get_buffers( filp, dev, &d );
} }
DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d) ); DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d) );
......
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
#ifndef __R128_DRV_H__ #ifndef __R128_DRV_H__
#define __R128_DRV_H__ #define __R128_DRV_H__
#define GET_RING_HEAD(ring) DRM_READ32( (volatile u32 *) (ring)->head ) #define GET_RING_HEAD(ring) DRM_READ32( (ring)->ring_rptr, 0 ) /* (ring)->head */
#define SET_RING_HEAD(ring,val) DRM_WRITE32( (volatile u32 *) (ring)->head, (val) ) #define SET_RING_HEAD(ring,val) DRM_WRITE32( (ring)->ring_rptr, 0, (val) ) /* (ring)->head */
typedef struct drm_r128_freelist { typedef struct drm_r128_freelist {
unsigned int age; unsigned int age;
...@@ -56,6 +56,7 @@ typedef struct drm_r128_ring_buffer { ...@@ -56,6 +56,7 @@ typedef struct drm_r128_ring_buffer {
int space; int space;
int high_mark; int high_mark;
drm_local_map_t *ring_rptr;
} drm_r128_ring_buffer_t; } drm_r128_ring_buffer_t;
typedef struct drm_r128_private { typedef struct drm_r128_private {
...@@ -98,13 +99,13 @@ typedef struct drm_r128_private { ...@@ -98,13 +99,13 @@ typedef struct drm_r128_private {
u32 depth_pitch_offset_c; u32 depth_pitch_offset_c;
u32 span_pitch_offset_c; u32 span_pitch_offset_c;
drm_map_t *sarea; drm_local_map_t *sarea;
drm_map_t *fb; drm_local_map_t *fb;
drm_map_t *mmio; drm_local_map_t *mmio;
drm_map_t *cce_ring; drm_local_map_t *cce_ring;
drm_map_t *ring_rptr; drm_local_map_t *ring_rptr;
drm_map_t *buffers; drm_local_map_t *buffers;
drm_map_t *agp_textures; drm_local_map_t *agp_textures;
} drm_r128_private_t; } drm_r128_private_t;
typedef struct drm_r128_buf_priv { typedef struct drm_r128_buf_priv {
...@@ -370,15 +371,10 @@ extern int r128_cce_indirect( DRM_IOCTL_ARGS ); ...@@ -370,15 +371,10 @@ extern int r128_cce_indirect( DRM_IOCTL_ARGS );
#define R128_PERFORMANCE_BOXES 0 #define R128_PERFORMANCE_BOXES 0
#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define R128_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define R128_ADDR(reg) (R128_BASE( reg ) + reg) #define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
#define R128_READ(reg) DRM_READ32( (volatile u32 *) R128_ADDR(reg) )
#define R128_WRITE(reg,val) DRM_WRITE32( (volatile u32 *) R128_ADDR(reg), (val) )
#define R128_READ8(reg) DRM_READ8( (volatile u8 *) R128_ADDR(reg) )
#define R128_WRITE8(reg,val) DRM_WRITE8( (volatile u8 *) R128_ADDR(reg), (val) )
#define R128_WRITE_PLL(addr,val) \ #define R128_WRITE_PLL(addr,val) \
do { \ do { \
...@@ -403,15 +399,6 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr); ...@@ -403,15 +399,6 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr);
* Misc helper macros * Misc helper macros
*/ */
#define LOCK_TEST_WITH_RETURN( dev ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.pid != DRM_CURRENTPID ) { \
DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); \
return DRM_ERR(EINVAL); \
} \
} while (0)
#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
do { \ do { \
drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
...@@ -453,7 +440,7 @@ do { \ ...@@ -453,7 +440,7 @@ do { \
#if defined(__powerpc__) #if defined(__powerpc__)
#define r128_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring ) #define r128_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
#else #else
#define r128_flush_write_combine() DRM_WRITEMEMORYBARRIER() #define r128_flush_write_combine() DRM_WRITEMEMORYBARRIER(dev_priv->ring_rptr)
#endif #endif
......
...@@ -778,7 +778,8 @@ static void r128_cce_dispatch_indices( drm_device_t *dev, ...@@ -778,7 +778,8 @@ static void r128_cce_dispatch_indices( drm_device_t *dev,
sarea_priv->nbox = 0; sarea_priv->nbox = 0;
} }
static int r128_cce_dispatch_blit( drm_device_t *dev, static int r128_cce_dispatch_blit( DRMFILE filp,
drm_device_t *dev,
drm_r128_blit_t *blit ) drm_r128_blit_t *blit )
{ {
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
...@@ -829,9 +830,9 @@ static int r128_cce_dispatch_blit( drm_device_t *dev, ...@@ -829,9 +830,9 @@ static int r128_cce_dispatch_blit( drm_device_t *dev,
buf = dma->buflist[blit->idx]; buf = dma->buflist[blit->idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
...@@ -896,7 +897,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev, ...@@ -896,7 +897,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev,
int count, x, y; int count, x, y;
u32 *buffer; u32 *buffer;
u8 *mask; u8 *mask;
int i; int i, buffer_size, mask_size;
RING_LOCALS; RING_LOCALS;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
...@@ -908,25 +909,25 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev, ...@@ -908,25 +909,25 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev,
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
buffer = DRM_MALLOC( depth->n * sizeof(u32) ); buffer_size = depth->n * sizeof(u32);
buffer = DRM_MALLOC( buffer_size );
if ( buffer == NULL ) if ( buffer == NULL )
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
if ( DRM_COPY_FROM_USER( buffer, depth->buffer, if ( DRM_COPY_FROM_USER( buffer, depth->buffer, buffer_size ) ) {
depth->n * sizeof(u32) ) ) { DRM_FREE( buffer, buffer_size);
DRM_FREE( buffer );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
mask_size = depth->n * sizeof(u8);
if ( depth->mask ) { if ( depth->mask ) {
mask = DRM_MALLOC( depth->n * sizeof(u8) ); mask = DRM_MALLOC( mask_size );
if ( mask == NULL ) { if ( mask == NULL ) {
DRM_FREE( buffer ); DRM_FREE( buffer, buffer_size );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
if ( DRM_COPY_FROM_USER( mask, depth->mask, if ( DRM_COPY_FROM_USER( mask, depth->mask, mask_size ) ) {
depth->n * sizeof(u8) ) ) { DRM_FREE( buffer, buffer_size );
DRM_FREE( buffer ); DRM_FREE( mask, mask_size );
DRM_FREE( mask );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
...@@ -953,7 +954,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev, ...@@ -953,7 +954,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev,
} }
} }
DRM_FREE( mask ); DRM_FREE( mask, mask_size );
} else { } else {
for ( i = 0 ; i < count ; i++, x++ ) { for ( i = 0 ; i < count ; i++, x++ ) {
BEGIN_RING( 6 ); BEGIN_RING( 6 );
...@@ -977,7 +978,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev, ...@@ -977,7 +978,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev,
} }
} }
DRM_FREE( buffer ); DRM_FREE( buffer, buffer_size );
return 0; return 0;
} }
...@@ -989,60 +990,62 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev, ...@@ -989,60 +990,62 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
int count, *x, *y; int count, *x, *y;
u32 *buffer; u32 *buffer;
u8 *mask; u8 *mask;
int i; int i, xbuf_size, ybuf_size, buffer_size, mask_size;
RING_LOCALS; RING_LOCALS;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
count = depth->n; count = depth->n;
x = DRM_MALLOC( count * sizeof(*x) ); xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = DRM_MALLOC( xbuf_size );
if ( x == NULL ) { if ( x == NULL ) {
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
y = DRM_MALLOC( count * sizeof(*y) ); y = DRM_MALLOC( ybuf_size );
if ( y == NULL ) { if ( y == NULL ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
if ( DRM_COPY_FROM_USER( x, depth->x, count * sizeof(int) ) ) { if ( DRM_COPY_FROM_USER( x, depth->x, xbuf_size ) ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
if ( DRM_COPY_FROM_USER( y, depth->y, count * sizeof(int) ) ) { if ( DRM_COPY_FROM_USER( y, depth->y, xbuf_size ) ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
buffer = DRM_MALLOC( depth->n * sizeof(u32) ); buffer_size = depth->n * sizeof(u32);
buffer = DRM_MALLOC( buffer_size );
if ( buffer == NULL ) { if ( buffer == NULL ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
if ( DRM_COPY_FROM_USER( buffer, depth->buffer, if ( DRM_COPY_FROM_USER( buffer, depth->buffer, buffer_size ) ) {
depth->n * sizeof(u32) ) ) { DRM_FREE( x, xbuf_size );
DRM_FREE( x ); DRM_FREE( y, ybuf_size );
DRM_FREE( y ); DRM_FREE( buffer, buffer_size );
DRM_FREE( buffer );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
if ( depth->mask ) { if ( depth->mask ) {
mask = DRM_MALLOC( depth->n * sizeof(u8) ); mask_size = depth->n * sizeof(u8);
mask = DRM_MALLOC( mask_size );
if ( mask == NULL ) { if ( mask == NULL ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
DRM_FREE( buffer ); DRM_FREE( buffer, buffer_size );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
if ( DRM_COPY_FROM_USER( mask, depth->mask, if ( DRM_COPY_FROM_USER( mask, depth->mask, mask_size ) ) {
depth->n * sizeof(u8) ) ) { DRM_FREE( x, xbuf_size );
DRM_FREE( x ); DRM_FREE( y, ybuf_size );
DRM_FREE( y ); DRM_FREE( buffer, buffer_size );
DRM_FREE( buffer ); DRM_FREE( mask, mask_size );
DRM_FREE( mask );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
...@@ -1069,7 +1072,7 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev, ...@@ -1069,7 +1072,7 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
} }
} }
DRM_FREE( mask ); DRM_FREE( mask, mask_size );
} else { } else {
for ( i = 0 ; i < count ; i++ ) { for ( i = 0 ; i < count ; i++ ) {
BEGIN_RING( 6 ); BEGIN_RING( 6 );
...@@ -1093,9 +1096,9 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev, ...@@ -1093,9 +1096,9 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
} }
} }
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
DRM_FREE( buffer ); DRM_FREE( buffer, buffer_size );
return 0; return 0;
} }
...@@ -1146,7 +1149,7 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev, ...@@ -1146,7 +1149,7 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
{ {
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
int count, *x, *y; int count, *x, *y;
int i; int i, xbuf_size, ybuf_size;
RING_LOCALS; RING_LOCALS;
DRM_DEBUG( "%s\n", __FUNCTION__ ); DRM_DEBUG( "%s\n", __FUNCTION__ );
...@@ -1155,23 +1158,25 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev, ...@@ -1155,23 +1158,25 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
count = dev_priv->depth_pitch; count = dev_priv->depth_pitch;
} }
x = DRM_MALLOC( count * sizeof(*x) ); xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = DRM_MALLOC( xbuf_size );
if ( x == NULL ) { if ( x == NULL ) {
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
y = DRM_MALLOC( count * sizeof(*y) ); y = DRM_MALLOC( ybuf_size );
if ( y == NULL ) { if ( y == NULL ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
if ( DRM_COPY_FROM_USER( x, depth->x, count * sizeof(int) ) ) { if ( DRM_COPY_FROM_USER( x, depth->x, xbuf_size ) ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
if ( DRM_COPY_FROM_USER( y, depth->y, count * sizeof(int) ) ) { if ( DRM_COPY_FROM_USER( y, depth->y, ybuf_size ) ) {
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
...@@ -1199,8 +1204,8 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev, ...@@ -1199,8 +1204,8 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
ADVANCE_RING(); ADVANCE_RING();
} }
DRM_FREE( x ); DRM_FREE( x, xbuf_size );
DRM_FREE( y ); DRM_FREE( y, ybuf_size );
return 0; return 0;
} }
...@@ -1240,7 +1245,7 @@ int r128_cce_clear( DRM_IOCTL_ARGS ) ...@@ -1240,7 +1245,7 @@ int r128_cce_clear( DRM_IOCTL_ARGS )
drm_r128_clear_t clear; drm_r128_clear_t clear;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t *) data, DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t *) data,
sizeof(clear) ); sizeof(clear) );
...@@ -1266,7 +1271,7 @@ int r128_cce_swap( DRM_IOCTL_ARGS ) ...@@ -1266,7 +1271,7 @@ int r128_cce_swap( DRM_IOCTL_ARGS )
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG( "%s\n", __FUNCTION__ ); DRM_DEBUG( "%s\n", __FUNCTION__ );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
...@@ -1293,7 +1298,7 @@ int r128_cce_vertex( DRM_IOCTL_ARGS ) ...@@ -1293,7 +1298,7 @@ int r128_cce_vertex( DRM_IOCTL_ARGS )
drm_r128_buf_priv_t *buf_priv; drm_r128_buf_priv_t *buf_priv;
drm_r128_vertex_t vertex; drm_r128_vertex_t vertex;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -1324,9 +1329,9 @@ int r128_cce_vertex( DRM_IOCTL_ARGS ) ...@@ -1324,9 +1329,9 @@ int r128_cce_vertex( DRM_IOCTL_ARGS )
buf = dma->buflist[vertex.idx]; buf = dma->buflist[vertex.idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
...@@ -1353,7 +1358,7 @@ int r128_cce_indices( DRM_IOCTL_ARGS ) ...@@ -1353,7 +1358,7 @@ int r128_cce_indices( DRM_IOCTL_ARGS )
drm_r128_indices_t elts; drm_r128_indices_t elts;
int count; int count;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -1383,9 +1388,9 @@ int r128_cce_indices( DRM_IOCTL_ARGS ) ...@@ -1383,9 +1388,9 @@ int r128_cce_indices( DRM_IOCTL_ARGS )
buf = dma->buflist[elts.idx]; buf = dma->buflist[elts.idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
...@@ -1421,7 +1426,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS ) ...@@ -1421,7 +1426,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_blit_t blit; drm_r128_blit_t blit;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t *) data, DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t *) data,
sizeof(blit) ); sizeof(blit) );
...@@ -1437,7 +1442,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS ) ...@@ -1437,7 +1442,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS )
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv );
return r128_cce_dispatch_blit( dev, &blit ); return r128_cce_dispatch_blit( filp, dev, &blit );
} }
int r128_cce_depth( DRM_IOCTL_ARGS ) int r128_cce_depth( DRM_IOCTL_ARGS )
...@@ -1446,7 +1451,7 @@ int r128_cce_depth( DRM_IOCTL_ARGS ) ...@@ -1446,7 +1451,7 @@ int r128_cce_depth( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_depth_t depth; drm_r128_depth_t depth;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t *) data, DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t *) data,
sizeof(depth) ); sizeof(depth) );
...@@ -1474,7 +1479,7 @@ int r128_cce_stipple( DRM_IOCTL_ARGS ) ...@@ -1474,7 +1479,7 @@ int r128_cce_stipple( DRM_IOCTL_ARGS )
drm_r128_stipple_t stipple; drm_r128_stipple_t stipple;
u32 mask[32]; u32 mask[32];
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t *) data, DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t *) data,
sizeof(stipple) ); sizeof(stipple) );
...@@ -1502,7 +1507,7 @@ int r128_cce_indirect( DRM_IOCTL_ARGS ) ...@@ -1502,7 +1507,7 @@ int r128_cce_indirect( DRM_IOCTL_ARGS )
RING_LOCALS; RING_LOCALS;
#endif #endif
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -1525,9 +1530,9 @@ int r128_cce_indirect( DRM_IOCTL_ARGS ) ...@@ -1525,9 +1530,9 @@ int r128_cce_indirect( DRM_IOCTL_ARGS )
buf = dma->buflist[indirect.idx]; buf = dma->buflist[indirect.idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#define DRIVER_DATE "20020828" #define DRIVER_DATE "20020828"
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 7 #define DRIVER_MINOR 8
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
/* Interface history: /* Interface history:
...@@ -77,6 +77,7 @@ ...@@ -77,6 +77,7 @@
* and R200_PP_CUBIC_OFFSET_F1_[0..5]. * and R200_PP_CUBIC_OFFSET_F1_[0..5].
* Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
* R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
* 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
*/ */
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
...@@ -105,11 +106,6 @@ ...@@ -105,11 +106,6 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 },
#define USE_IRQS 1
#if USE_IRQS
#define __HAVE_DMA_IRQ 1
#define __HAVE_VBL_IRQ 1
#define __HAVE_SHARED_IRQ 1
/* When a client dies: /* When a client dies:
* - Check for and clean up flipped page state * - Check for and clean up flipped page state
...@@ -117,35 +113,34 @@ ...@@ -117,35 +113,34 @@
* *
* DRM infrastructure takes care of reclaiming dma buffers. * DRM infrastructure takes care of reclaiming dma buffers.
*/ */
#define DRIVER_PRERELEASE() do { \ #define DRIVER_PRERELEASE() \
do { \
if ( dev->dev_private ) { \ if ( dev->dev_private ) { \
drm_radeon_private_t *dev_priv = dev->dev_private; \ drm_radeon_private_t *dev_priv = dev->dev_private; \
if ( dev_priv->page_flipping ) { \ if ( dev_priv->page_flipping ) { \
radeon_do_cleanup_pageflip( dev ); \ radeon_do_cleanup_pageflip( dev ); \
} \ } \
radeon_mem_release( dev_priv->agp_heap ); \ radeon_mem_release( filp, dev_priv->agp_heap ); \
radeon_mem_release( filp, dev_priv->fb_heap ); \
} \ } \
} while (0) } while (0)
/* On unloading the module: /* When the last client dies, shut down the CP and free dev->dev_priv.
* - Free memory heap structure
* - Remove mappings made at startup and free dev_private.
*/ */
#define DRIVER_PRETAKEDOWN() do { \ /* #define __HAVE_RELEASE 1 */
if ( dev->dev_private ) { \ #define DRIVER_PRETAKEDOWN() \
drm_radeon_private_t *dev_priv = dev->dev_private; \ do { \
radeon_mem_takedown( &(dev_priv->agp_heap) ); \ radeon_do_release( dev ); \
radeon_do_cleanup_cp( dev ); \
} \
} while (0) } while (0)
#else
#define __HAVE_DMA_IRQ 0
#endif
/* DMA customization: /* DMA customization:
*/ */
#define __HAVE_DMA 1 #define __HAVE_DMA 1
#define __HAVE_DMA_IRQ 1
#define __HAVE_VBL_IRQ 1
#define __HAVE_SHARED_IRQ 1
/* Buffer customization: /* Buffer customization:
......
...@@ -926,11 +926,11 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, ...@@ -926,11 +926,11 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 );
/* Writeback doesn't seem to work everywhere, test it first */ /* Writeback doesn't seem to work everywhere, test it first */
DRM_WRITE32( &dev_priv->scratch[1], 0 ); DRM_WRITE32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0 );
RADEON_WRITE( RADEON_SCRATCH_REG1, 0xdeadbeef ); RADEON_WRITE( RADEON_SCRATCH_REG1, 0xdeadbeef );
for ( tmp = 0 ; tmp < dev_priv->usec_timeout ; tmp++ ) { for ( tmp = 0 ; tmp < dev_priv->usec_timeout ; tmp++ ) {
if ( DRM_READ32( &dev_priv->scratch[1] ) == 0xdeadbeef ) if ( DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(1) ) == 0xdeadbeef )
break; break;
DRM_UDELAY( 1 ); DRM_UDELAY( 1 );
} }
...@@ -1217,6 +1217,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) ...@@ -1217,6 +1217,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
(dev_priv->ring.size / sizeof(u32)) - 1; (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
dev_priv->ring.ring_rptr = dev_priv->ring_rptr;
#if __REALLY_HAVE_SG #if __REALLY_HAVE_SG
if ( dev_priv->is_pci ) { if ( dev_priv->is_pci ) {
...@@ -1322,7 +1323,7 @@ int radeon_cp_start( DRM_IOCTL_ARGS ) ...@@ -1322,7 +1323,7 @@ int radeon_cp_start( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( dev_priv->cp_running ) { if ( dev_priv->cp_running ) {
DRM_DEBUG( "%s while CP running\n", __FUNCTION__ ); DRM_DEBUG( "%s while CP running\n", __FUNCTION__ );
...@@ -1350,10 +1351,13 @@ int radeon_cp_stop( DRM_IOCTL_ARGS ) ...@@ -1350,10 +1351,13 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
int ret; int ret;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) ); DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
if (!dev_priv->cp_running)
return 0;
/* Flush any pending CP commands. This ensures any outstanding /* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off. * commands are exectuted by the engine before we turn it off.
*/ */
...@@ -1381,6 +1385,39 @@ int radeon_cp_stop( DRM_IOCTL_ARGS ) ...@@ -1381,6 +1385,39 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
return 0; return 0;
} }
void radeon_do_release( drm_device_t *dev )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int ret;
if (dev_priv) {
if (dev_priv->cp_running) {
/* Stop the cp */
while ((ret = radeon_do_cp_idle( dev_priv )) != 0) {
DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
#ifdef __linux__
schedule();
#else
tsleep(&ret, PZERO, "rdnrel", 1);
#endif
}
radeon_do_cp_stop( dev_priv );
radeon_do_engine_reset( dev );
}
/* Disable *all* interrupts */
RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
/* Free memory heap structures */
radeon_mem_takedown( &(dev_priv->agp_heap) );
radeon_mem_takedown( &(dev_priv->fb_heap) );
/* deallocate kernel resources */
radeon_do_cleanup_cp( dev );
}
}
/* Just reset the CP ring. Called as part of an X Server engine reset. /* Just reset the CP ring. Called as part of an X Server engine reset.
*/ */
int radeon_cp_reset( DRM_IOCTL_ARGS ) int radeon_cp_reset( DRM_IOCTL_ARGS )
...@@ -1389,7 +1426,7 @@ int radeon_cp_reset( DRM_IOCTL_ARGS ) ...@@ -1389,7 +1426,7 @@ int radeon_cp_reset( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_DEBUG( "%s called before init done\n", __FUNCTION__ ); DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
...@@ -1410,10 +1447,7 @@ int radeon_cp_idle( DRM_IOCTL_ARGS ) ...@@ -1410,10 +1447,7 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
/* if (dev->irq) */
/* radeon_emit_and_wait_irq( dev ); */
return radeon_do_cp_idle( dev_priv ); return radeon_do_cp_idle( dev_priv );
} }
...@@ -1423,7 +1457,7 @@ int radeon_engine_reset( DRM_IOCTL_ARGS ) ...@@ -1423,7 +1457,7 @@ int radeon_engine_reset( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
return radeon_do_engine_reset( dev ); return radeon_do_engine_reset( dev );
} }
...@@ -1482,7 +1516,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev ) ...@@ -1482,7 +1516,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
for ( i = start ; i < dma->buf_count ; i++ ) { for ( i = start ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i]; buf = dma->buflist[i];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid == 0 || (buf->pending && if ( buf->filp == 0 || (buf->pending &&
buf_priv->age <= done_age) ) { buf_priv->age <= done_age) ) {
dev_priv->stats.requested_bufs++; dev_priv->stats.requested_bufs++;
buf->pending = 0; buf->pending = 0;
...@@ -1509,7 +1543,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev ) ...@@ -1509,7 +1543,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
drm_buf_t *buf; drm_buf_t *buf;
int i, t; int i, t;
int start; int start;
u32 done_age = DRM_READ32(&dev_priv->scratch[1]); u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
if ( ++dev_priv->last_buf >= dma->buf_count ) if ( ++dev_priv->last_buf >= dma->buf_count )
dev_priv->last_buf = 0; dev_priv->last_buf = 0;
...@@ -1521,7 +1555,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev ) ...@@ -1521,7 +1555,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
for ( i = start ; i < dma->buf_count ; i++ ) { for ( i = start ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i]; buf = dma->buflist[i];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid == 0 || (buf->pending && if ( buf->filp == 0 || (buf->pending &&
buf_priv->age <= done_age) ) { buf_priv->age <= done_age) ) {
dev_priv->stats.requested_bufs++; dev_priv->stats.requested_bufs++;
buf->pending = 0; buf->pending = 0;
...@@ -1586,7 +1620,7 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ) ...@@ -1586,7 +1620,7 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
return DRM_ERR(EBUSY); return DRM_ERR(EBUSY);
} }
static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d ) static int radeon_cp_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
{ {
int i; int i;
drm_buf_t *buf; drm_buf_t *buf;
...@@ -1595,7 +1629,7 @@ static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d ) ...@@ -1595,7 +1629,7 @@ static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d )
buf = radeon_freelist_get( dev ); buf = radeon_freelist_get( dev );
if ( !buf ) return DRM_ERR(EBUSY); /* NOTE: broken client */ if ( !buf ) return DRM_ERR(EBUSY); /* NOTE: broken client */
buf->pid = DRM_CURRENTPID; buf->filp = filp;
if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx, if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx,
sizeof(buf->idx) ) ) sizeof(buf->idx) ) )
...@@ -1616,7 +1650,7 @@ int radeon_cp_buffers( DRM_IOCTL_ARGS ) ...@@ -1616,7 +1650,7 @@ int radeon_cp_buffers( DRM_IOCTL_ARGS )
int ret = 0; int ret = 0;
drm_dma_t d; drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) ); DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
...@@ -1639,7 +1673,7 @@ int radeon_cp_buffers( DRM_IOCTL_ARGS ) ...@@ -1639,7 +1673,7 @@ int radeon_cp_buffers( DRM_IOCTL_ARGS )
d.granted_count = 0; d.granted_count = 0;
if ( d.request_count ) { if ( d.request_count ) {
ret = radeon_cp_get_buffers( dev, &d ); ret = radeon_cp_get_buffers( filp, dev, &d );
} }
DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) ); DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
......
...@@ -382,7 +382,7 @@ typedef struct { ...@@ -382,7 +382,7 @@ typedef struct {
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) #define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) #define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) #define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t) #define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex2_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t) #define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t) #define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52) #define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)
...@@ -396,7 +396,7 @@ typedef struct drm_radeon_init { ...@@ -396,7 +396,7 @@ typedef struct drm_radeon_init {
enum { enum {
RADEON_INIT_CP = 0x01, RADEON_INIT_CP = 0x01,
RADEON_CLEANUP_CP = 0x02, RADEON_CLEANUP_CP = 0x02,
RADEON_INIT_R200_CP = 0x03, RADEON_INIT_R200_CP = 0x03
} func; } func;
unsigned long sarea_priv_offset; unsigned long sarea_priv_offset;
int is_pci; int is_pci;
......
...@@ -31,8 +31,8 @@ ...@@ -31,8 +31,8 @@
#ifndef __RADEON_DRV_H__ #ifndef __RADEON_DRV_H__
#define __RADEON_DRV_H__ #define __RADEON_DRV_H__
#define GET_RING_HEAD(ring) DRM_READ32( (volatile u32 *) (ring)->head ) #define GET_RING_HEAD(ring) DRM_READ32( (ring)->ring_rptr, 0 ) /* (ring)->head */
#define SET_RING_HEAD(ring,val) DRM_WRITE32( (volatile u32 *) (ring)->head , (val)) #define SET_RING_HEAD(ring,val) DRM_WRITE32( (ring)->ring_rptr, 0, (val) ) /* (ring)->head */
typedef struct drm_radeon_freelist { typedef struct drm_radeon_freelist {
unsigned int age; unsigned int age;
...@@ -53,6 +53,7 @@ typedef struct drm_radeon_ring_buffer { ...@@ -53,6 +53,7 @@ typedef struct drm_radeon_ring_buffer {
int space; int space;
int high_mark; int high_mark;
drm_local_map_t *ring_rptr;
} drm_radeon_ring_buffer_t; } drm_radeon_ring_buffer_t;
typedef struct drm_radeon_depth_clear_t { typedef struct drm_radeon_depth_clear_t {
...@@ -67,7 +68,7 @@ struct mem_block { ...@@ -67,7 +68,7 @@ struct mem_block {
struct mem_block *prev; struct mem_block *prev;
int start; int start;
int size; int size;
int pid; /* 0: free, -1: heap, other: real pids */ DRMFILE filp; /* 0: free, -1: heap, other: real files */
}; };
typedef struct drm_radeon_private { typedef struct drm_radeon_private {
...@@ -126,13 +127,13 @@ typedef struct drm_radeon_private { ...@@ -126,13 +127,13 @@ typedef struct drm_radeon_private {
drm_radeon_depth_clear_t depth_clear; drm_radeon_depth_clear_t depth_clear;
drm_map_t *sarea; drm_local_map_t *sarea;
drm_map_t *fb; drm_local_map_t *fb;
drm_map_t *mmio; drm_local_map_t *mmio;
drm_map_t *cp_ring; drm_local_map_t *cp_ring;
drm_map_t *ring_rptr; drm_local_map_t *ring_rptr;
drm_map_t *buffers; drm_local_map_t *buffers;
drm_map_t *agp_textures; drm_local_map_t *agp_textures;
struct mem_block *agp_heap; struct mem_block *agp_heap;
struct mem_block *fb_heap; struct mem_block *fb_heap;
...@@ -183,7 +184,7 @@ extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); ...@@ -183,7 +184,7 @@ extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
extern int radeon_mem_free( DRM_IOCTL_ARGS ); extern int radeon_mem_free( DRM_IOCTL_ARGS );
extern int radeon_mem_init_heap( DRM_IOCTL_ARGS ); extern int radeon_mem_init_heap( DRM_IOCTL_ARGS );
extern void radeon_mem_takedown( struct mem_block **heap ); extern void radeon_mem_takedown( struct mem_block **heap );
extern void radeon_mem_release( struct mem_block *heap ); extern void radeon_mem_release( DRMFILE filp, struct mem_block *heap );
/* radeon_irq.c */ /* radeon_irq.c */
extern int radeon_irq_emit( DRM_IOCTL_ARGS ); extern int radeon_irq_emit( DRM_IOCTL_ARGS );
...@@ -193,6 +194,7 @@ extern int radeon_emit_and_wait_irq(drm_device_t *dev); ...@@ -193,6 +194,7 @@ extern int radeon_emit_and_wait_irq(drm_device_t *dev);
extern int radeon_wait_irq(drm_device_t *dev, int swi_nr); extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t *dev); extern int radeon_emit_irq(drm_device_t *dev);
extern void radeon_do_release(drm_device_t *dev);
/* Flags for stats.boxes /* Flags for stats.boxes
*/ */
...@@ -266,8 +268,10 @@ extern int radeon_emit_irq(drm_device_t *dev); ...@@ -266,8 +268,10 @@ extern int radeon_emit_irq(drm_device_t *dev);
#define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_UMSK 0x0770
#define RADEON_SCRATCH_ADDR 0x0774 #define RADEON_SCRATCH_ADDR 0x0774
#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
#define GET_SCRATCH( x ) (dev_priv->writeback_works \ #define GET_SCRATCH( x ) (dev_priv->writeback_works \
? DRM_READ32( &dev_priv->scratch[(x)] ) \ ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
: RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
...@@ -686,15 +690,10 @@ extern int radeon_emit_irq(drm_device_t *dev); ...@@ -686,15 +690,10 @@ extern int radeon_emit_irq(drm_device_t *dev);
#define RADEON_RING_HIGH_MARK 128 #define RADEON_RING_HIGH_MARK 128
#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define RADEON_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define RADEON_ADDR(reg) (RADEON_BASE( reg ) + reg) #define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
#define RADEON_READ(reg) DRM_READ32( (volatile u32 *) RADEON_ADDR(reg) )
#define RADEON_WRITE(reg,val) DRM_WRITE32( (volatile u32 *) RADEON_ADDR(reg), (val) )
#define RADEON_READ8(reg) DRM_READ8( (volatile u8 *) RADEON_ADDR(reg) )
#define RADEON_WRITE8(reg,val) DRM_WRITE8( (volatile u8 *) RADEON_ADDR(reg), (val) )
#define RADEON_WRITE_PLL( addr, val ) \ #define RADEON_WRITE_PLL( addr, val ) \
do { \ do { \
...@@ -771,16 +770,6 @@ extern int RADEON_READ_PLL( drm_device_t *dev, int addr ); ...@@ -771,16 +770,6 @@ extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
* Misc helper macros * Misc helper macros
*/ */
#define LOCK_TEST_WITH_RETURN( dev ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.pid != DRM_CURRENTPID ) { \
DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); \
return DRM_ERR(EINVAL); \
} \
} while (0)
/* Perfbox functionality only. /* Perfbox functionality only.
*/ */
#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
...@@ -823,13 +812,6 @@ do { \ ...@@ -823,13 +812,6 @@ do { \
* Ring control * Ring control
*/ */
#if defined(__powerpc__)
#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
#else
#define radeon_flush_write_combine() DRM_WRITEMEMORYBARRIER()
#endif
#define RADEON_VERBOSE 0 #define RADEON_VERBOSE 0
#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring; #define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
...@@ -863,8 +845,13 @@ do { \ ...@@ -863,8 +845,13 @@ do { \
dev_priv->ring.tail = write; \ dev_priv->ring.tail = write; \
} while (0) } while (0)
#define COMMIT_RING() do { \ #define COMMIT_RING() do { \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ /* Flush writes to ring */ \
DRM_READMEMORYBARRIER(dev_priv->mmio); \
GET_RING_HEAD( &dev_priv->ring ); \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
/* read from PCI bus to ensure correct posting */ \
RADEON_READ( RADEON_CP_RB_RPTR ); \
} while (0) } while (0)
#define OUT_RING( x ) do { \ #define OUT_RING( x ) do { \
......
...@@ -61,7 +61,11 @@ void DRM(dma_service)( DRM_IRQ_ARGS ) ...@@ -61,7 +61,11 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
(drm_radeon_private_t *)dev->dev_private; (drm_radeon_private_t *)dev->dev_private;
u32 stat; u32 stat;
stat = RADEON_READ(RADEON_GEN_INT_STATUS); /* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = RADEON_READ(RADEON_GEN_INT_STATUS)
& (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT);
if (!stat) if (!stat)
return; return;
...@@ -77,15 +81,14 @@ void DRM(dma_service)( DRM_IRQ_ARGS ) ...@@ -77,15 +81,14 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
DRM(vbl_send_signals)( dev ); DRM(vbl_send_signals)( dev );
} }
/* Acknowledge all the bits in GEN_INT_STATUS -- seem to get /* Acknowledge interrupts we handle */
* more than we asked for...
*/
RADEON_WRITE(RADEON_GEN_INT_STATUS, stat); RADEON_WRITE(RADEON_GEN_INT_STATUS, stat);
} }
static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv) static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv)
{ {
u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS ); u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS )
& (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT);
if (tmp) if (tmp)
RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp ); RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp );
} }
...@@ -176,7 +179,7 @@ int radeon_irq_emit( DRM_IOCTL_ARGS ) ...@@ -176,7 +179,7 @@ int radeon_irq_emit( DRM_IOCTL_ARGS )
drm_radeon_irq_emit_t emit; drm_radeon_irq_emit_t emit;
int result; int result;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
*/ */
static struct mem_block *split_block(struct mem_block *p, int start, int size, static struct mem_block *split_block(struct mem_block *p, int start, int size,
int pid ) DRMFILE filp )
{ {
/* Maybe cut off the start of an existing block */ /* Maybe cut off the start of an existing block */
if (start > p->start) { if (start > p->start) {
...@@ -49,7 +49,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, ...@@ -49,7 +49,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out; goto out;
newblock->start = start; newblock->start = start;
newblock->size = p->size - (start - p->start); newblock->size = p->size - (start - p->start);
newblock->pid = 0; newblock->filp = 0;
newblock->next = p->next; newblock->next = p->next;
newblock->prev = p; newblock->prev = p;
p->next->prev = newblock; p->next->prev = newblock;
...@@ -65,7 +65,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, ...@@ -65,7 +65,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out; goto out;
newblock->start = start + size; newblock->start = start + size;
newblock->size = p->size - size; newblock->size = p->size - size;
newblock->pid = 0; newblock->filp = 0;
newblock->next = p->next; newblock->next = p->next;
newblock->prev = p; newblock->prev = p;
p->next->prev = newblock; p->next->prev = newblock;
...@@ -75,20 +75,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, ...@@ -75,20 +75,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
out: out:
/* Our block is in the middle */ /* Our block is in the middle */
p->pid = pid; p->filp = filp;
return p; return p;
} }
static struct mem_block *alloc_block( struct mem_block *heap, int size, static struct mem_block *alloc_block( struct mem_block *heap, int size,
int align2, int pid ) int align2, DRMFILE filp )
{ {
struct mem_block *p; struct mem_block *p;
int mask = (1 << align2)-1; int mask = (1 << align2)-1;
for (p = heap->next ; p != heap ; p = p->next) { for (p = heap->next ; p != heap ; p = p->next) {
int start = (p->start + mask) & ~mask; int start = (p->start + mask) & ~mask;
if (p->pid == 0 && start + size <= p->start + p->size) if (p->filp == 0 && start + size <= p->start + p->size)
return split_block( p, start, size, pid ); return split_block( p, start, size, filp );
} }
return NULL; return NULL;
...@@ -108,25 +108,25 @@ static struct mem_block *find_block( struct mem_block *heap, int start ) ...@@ -108,25 +108,25 @@ static struct mem_block *find_block( struct mem_block *heap, int start )
static void free_block( struct mem_block *p ) static void free_block( struct mem_block *p )
{ {
p->pid = 0; p->filp = 0;
/* Assumes a single contiguous range. Needs a special pid in /* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed. * 'heap' to stop it being subsumed.
*/ */
if (p->next->pid == 0) { if (p->next->filp == 0) {
struct mem_block *q = p->next; struct mem_block *q = p->next;
p->size += q->size; p->size += q->size;
p->next = q->next; p->next = q->next;
p->next->prev = p; p->next->prev = p;
DRM_FREE(q); DRM_FREE(q, sizeof(*q));
} }
if (p->prev->pid == 0) { if (p->prev->filp == 0) {
struct mem_block *q = p->prev; struct mem_block *q = p->prev;
q->size += p->size; q->size += p->size;
q->next = p->next; q->next = p->next;
q->next->prev = q; q->next->prev = q;
DRM_FREE(p); DRM_FREE(p, sizeof(*q));
} }
} }
...@@ -141,47 +141,46 @@ static int init_heap(struct mem_block **heap, int start, int size) ...@@ -141,47 +141,46 @@ static int init_heap(struct mem_block **heap, int start, int size)
*heap = DRM_MALLOC(sizeof(**heap)); *heap = DRM_MALLOC(sizeof(**heap));
if (!*heap) { if (!*heap) {
DRM_FREE( blocks ); DRM_FREE( blocks, sizeof(*blocks) );
return -ENOMEM; return -ENOMEM;
} }
blocks->start = start; blocks->start = start;
blocks->size = size; blocks->size = size;
blocks->pid = 0; blocks->filp = 0;
blocks->next = blocks->prev = *heap; blocks->next = blocks->prev = *heap;
memset( *heap, 0, sizeof(**heap) ); memset( *heap, 0, sizeof(**heap) );
(*heap)->pid = -1; (*heap)->filp = (DRMFILE) -1;
(*heap)->next = (*heap)->prev = blocks; (*heap)->next = (*heap)->prev = blocks;
return 0; return 0;
} }
/* Free all blocks associated with the releasing pid. /* Free all blocks associated with the releasing file.
*/ */
void radeon_mem_release( struct mem_block *heap ) void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
{ {
int pid = DRM_CURRENTPID;
struct mem_block *p; struct mem_block *p;
if (!heap || !heap->next) if (!heap || !heap->next)
return; return;
for (p = heap->next ; p != heap ; p = p->next) { for (p = heap->next ; p != heap ; p = p->next) {
if (p->pid == pid) if (p->filp == filp)
p->pid = 0; p->filp = 0;
} }
/* Assumes a single contiguous range. Needs a special pid in /* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed. * 'heap' to stop it being subsumed.
*/ */
for (p = heap->next ; p != heap ; p = p->next) { for (p = heap->next ; p != heap ; p = p->next) {
while (p->pid == 0 && p->next->pid == 0) { while (p->filp == 0 && p->next->filp == 0) {
struct mem_block *q = p->next; struct mem_block *q = p->next;
p->size += q->size; p->size += q->size;
p->next = q->next; p->next = q->next;
p->next->prev = p; p->next->prev = p;
DRM_FREE(q); DRM_FREE(q, sizeof(*q));
} }
} }
} }
...@@ -198,10 +197,10 @@ void radeon_mem_takedown( struct mem_block **heap ) ...@@ -198,10 +197,10 @@ void radeon_mem_takedown( struct mem_block **heap )
for (p = (*heap)->next ; p != *heap ; ) { for (p = (*heap)->next ; p != *heap ; ) {
struct mem_block *q = p; struct mem_block *q = p;
p = p->next; p = p->next;
DRM_FREE(q); DRM_FREE(q, sizeof(*q));
} }
DRM_FREE( *heap ); DRM_FREE( *heap, sizeof(**heap) );
*heap = 0; *heap = 0;
} }
...@@ -248,7 +247,7 @@ int radeon_mem_alloc( DRM_IOCTL_ARGS ) ...@@ -248,7 +247,7 @@ int radeon_mem_alloc( DRM_IOCTL_ARGS )
alloc.alignment = 12; alloc.alignment = 12;
block = alloc_block( *heap, alloc.size, alloc.alignment, block = alloc_block( *heap, alloc.size, alloc.alignment,
DRM_CURRENTPID ); filp );
if (!block) if (!block)
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
...@@ -287,7 +286,7 @@ int radeon_mem_free( DRM_IOCTL_ARGS ) ...@@ -287,7 +286,7 @@ int radeon_mem_free( DRM_IOCTL_ARGS )
if (!block) if (!block)
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
if (block->pid != DRM_CURRENTPID) if (block->filp != filp)
return DRM_ERR(EPERM); return DRM_ERR(EPERM);
free_block( block ); free_block( block );
......
...@@ -1063,7 +1063,8 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev, ...@@ -1063,7 +1063,8 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
static int radeon_cp_dispatch_texture( drm_device_t *dev, static int radeon_cp_dispatch_texture( DRMFILE filp,
drm_device_t *dev,
drm_radeon_texture_t *tex, drm_radeon_texture_t *tex,
drm_radeon_tex_image_t *image ) drm_radeon_tex_image_t *image )
{ {
...@@ -1073,7 +1074,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, ...@@ -1073,7 +1074,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
u32 *buffer; u32 *buffer;
const u8 *data; const u8 *data;
int size, dwords, tex_width, blit_width; int size, dwords, tex_width, blit_width;
u32 y, height; u32 height;
int i; int i;
RING_LOCALS; RING_LOCALS;
...@@ -1138,10 +1139,9 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, ...@@ -1138,10 +1139,9 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
tex->offset >> 10, tex->pitch, tex->format, tex->offset >> 10, tex->pitch, tex->format,
image->x, image->y, image->width, image->height ); image->x, image->y, image->width, image->height );
/* Make a copy of the parameters in case we have to /* Make a copy of some parameters in case we have to
* update them for a multi-pass texture blit. * update them for a multi-pass texture blit.
*/ */
y = image->y;
height = image->height; height = image->height;
data = (const u8 *)image->data; data = (const u8 *)image->data;
...@@ -1156,11 +1156,6 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, ...@@ -1156,11 +1156,6 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return 0; return 0;
} }
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
image->data += size;
buf = radeon_freelist_get( dev ); buf = radeon_freelist_get( dev );
if ( 0 && !buf ) { if ( 0 && !buf ) {
radeon_do_cp_idle( dev_priv ); radeon_do_cp_idle( dev_priv );
...@@ -1190,7 +1185,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, ...@@ -1190,7 +1185,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
buffer[2] = (tex->pitch << 22) | (tex->offset >> 10); buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff; buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff; buffer[4] = 0xffffffff;
buffer[5] = (y << 16) | image->x; buffer[5] = (image->y << 16) | image->x;
buffer[6] = (height << 16) | image->width; buffer[6] = (height << 16) | image->width;
buffer[7] = dwords; buffer[7] = dwords;
buffer += 8; buffer += 8;
...@@ -1222,11 +1217,15 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, ...@@ -1222,11 +1217,15 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
} }
} }
buf->pid = DRM_CURRENTPID; buf->filp = filp;
buf->used = (dwords + 8) * sizeof(u32); buf->used = (dwords + 8) * sizeof(u32);
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
radeon_cp_discard_buffer( dev, buf ); radeon_cp_discard_buffer( dev, buf );
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
(const u8 *)image->data += size;
} while (image->height > 0); } while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures /* Flush the pixel cache after the blit completes. This ensures
...@@ -1275,7 +1274,7 @@ int radeon_cp_clear( DRM_IOCTL_ARGS ) ...@@ -1275,7 +1274,7 @@ int radeon_cp_clear( DRM_IOCTL_ARGS )
drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data, DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data,
sizeof(clear) ); sizeof(clear) );
...@@ -1344,7 +1343,7 @@ int radeon_cp_flip( DRM_IOCTL_ARGS ) ...@@ -1344,7 +1343,7 @@ int radeon_cp_flip( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
...@@ -1364,7 +1363,7 @@ int radeon_cp_swap( DRM_IOCTL_ARGS ) ...@@ -1364,7 +1363,7 @@ int radeon_cp_swap( DRM_IOCTL_ARGS )
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
...@@ -1388,7 +1387,7 @@ int radeon_cp_vertex( DRM_IOCTL_ARGS ) ...@@ -1388,7 +1387,7 @@ int radeon_cp_vertex( DRM_IOCTL_ARGS )
drm_radeon_vertex_t vertex; drm_radeon_vertex_t vertex;
drm_radeon_tcl_prim_t prim; drm_radeon_tcl_prim_t prim;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -1418,9 +1417,9 @@ int radeon_cp_vertex( DRM_IOCTL_ARGS ) ...@@ -1418,9 +1417,9 @@ int radeon_cp_vertex( DRM_IOCTL_ARGS )
buf = dma->buflist[vertex.idx]; buf = dma->buflist[vertex.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
...@@ -1475,7 +1474,7 @@ int radeon_cp_indices( DRM_IOCTL_ARGS ) ...@@ -1475,7 +1474,7 @@ int radeon_cp_indices( DRM_IOCTL_ARGS )
drm_radeon_tcl_prim_t prim; drm_radeon_tcl_prim_t prim;
int count; int count;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -1505,9 +1504,9 @@ int radeon_cp_indices( DRM_IOCTL_ARGS ) ...@@ -1505,9 +1504,9 @@ int radeon_cp_indices( DRM_IOCTL_ARGS )
buf = dma->buflist[elts.idx]; buf = dma->buflist[elts.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
...@@ -1570,7 +1569,7 @@ int radeon_cp_texture( DRM_IOCTL_ARGS ) ...@@ -1570,7 +1569,7 @@ int radeon_cp_texture( DRM_IOCTL_ARGS )
drm_radeon_tex_image_t image; drm_radeon_tex_image_t image;
int ret; int ret;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) ); DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) );
...@@ -1587,7 +1586,7 @@ int radeon_cp_texture( DRM_IOCTL_ARGS ) ...@@ -1587,7 +1586,7 @@ int radeon_cp_texture( DRM_IOCTL_ARGS )
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv );
ret = radeon_cp_dispatch_texture( dev, &tex, &image ); ret = radeon_cp_dispatch_texture( filp, dev, &tex, &image );
COMMIT_RING(); COMMIT_RING();
return ret; return ret;
...@@ -1600,7 +1599,7 @@ int radeon_cp_stipple( DRM_IOCTL_ARGS ) ...@@ -1600,7 +1599,7 @@ int radeon_cp_stipple( DRM_IOCTL_ARGS )
drm_radeon_stipple_t stipple; drm_radeon_stipple_t stipple;
u32 mask[32]; u32 mask[32];
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data, DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data,
sizeof(stipple) ); sizeof(stipple) );
...@@ -1625,7 +1624,7 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS ) ...@@ -1625,7 +1624,7 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS )
drm_radeon_indirect_t indirect; drm_radeon_indirect_t indirect;
RING_LOCALS; RING_LOCALS;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -1647,9 +1646,9 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS ) ...@@ -1647,9 +1646,9 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS )
buf = dma->buflist[indirect.idx]; buf = dma->buflist[indirect.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
...@@ -1702,7 +1701,7 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS ) ...@@ -1702,7 +1701,7 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS )
int i; int i;
unsigned char laststate; unsigned char laststate;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -1727,9 +1726,9 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS ) ...@@ -1727,9 +1726,9 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS )
buf = dma->buflist[vertex.idx]; buf = dma->buflist[vertex.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
...@@ -2029,7 +2028,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) ...@@ -2029,7 +2028,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
drm_radeon_cmd_header_t header; drm_radeon_cmd_header_t header;
int orig_nbox; int orig_nbox;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
...@@ -2098,8 +2097,9 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) ...@@ -2098,8 +2097,9 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
} }
buf = dma->buflist[idx]; buf = dma->buflist[idx];
if ( buf->pid != DRM_CURRENTPID || buf->pending ) { if ( buf->filp != filp || buf->pending ) {
DRM_ERROR( "bad buffer\n" ); DRM_ERROR( "bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
* *
*/ */
/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.2 2001/12/19 21:25:59 dawes Exp $ */ /* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.3 2002/10/30 12:52:38 alanh Exp $ */
#ifndef __SIS_H__ #ifndef __SIS_H__
#define __SIS_H__ #define __SIS_H__
......
...@@ -182,10 +182,10 @@ int sisp_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd, ...@@ -182,10 +182,10 @@ int sisp_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
if(block){ if(block){
/* TODO */ /* TODO */
agp.offset = block->ofs; agp.offset = block->ofs;
agp.free = (unsigned int)block; agp.free = (unsigned long)block;
if(!add_alloc_set(agp.context, AGP_TYPE, agp.free)){ if(!add_alloc_set(agp.context, AGP_TYPE, agp.free)){
DRM_DEBUG("adding to allocation set fails\n"); DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock)agp.free); mmFreeMem((PMemBlock)(unsigned long)agp.free);
retval = -1; retval = -1;
} }
} }
...@@ -218,7 +218,7 @@ int sisp_agp_free(struct inode *inode, struct file *filp, unsigned int cmd, ...@@ -218,7 +218,7 @@ int sisp_agp_free(struct inode *inode, struct file *filp, unsigned int cmd,
return -1; return -1;
} }
mmFreeMem((PMemBlock)agp.free); mmFreeMem((PMemBlock)(unsigned long)agp.free);
if(!del_alloc_set(agp.context, AGP_TYPE, agp.free)) if(!del_alloc_set(agp.context, AGP_TYPE, agp.free))
retval = -1; retval = -1;
...@@ -288,7 +288,7 @@ int sis_final_context(int context) ...@@ -288,7 +288,7 @@ int sis_final_context(int context)
retval = setFirst(set, &item); retval = setFirst(set, &item);
while(retval){ while(retval){
DRM_DEBUG("free agp memory 0x%x\n", item); DRM_DEBUG("free agp memory 0x%x\n", item);
mmFreeMem((PMemBlock)item); mmFreeMem((PMemBlock)(unsigned long)item);
retval = setNext(set, &item); retval = setNext(set, &item);
} }
setDestroy(set); setDestroy(set);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment