Commit 57c5ee79 authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie

vmwgfx: Add fence events

Add a way to send DRM events down the gpu fifo by attaching them to
fence objects. This may be useful for Xserver swapbuffer throttling and
page-flip done notifications.

Bump version to 2.2 to signal the availability of the FENCE_EVENT ioctl.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarJakob Bornecrantz <jakob@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 8bf445ce
...@@ -94,6 +94,9 @@ ...@@ -94,6 +94,9 @@
#define DRM_IOCTL_VMW_FENCE_UNREF \ #define DRM_IOCTL_VMW_FENCE_UNREF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
struct drm_vmw_fence_arg) struct drm_vmw_fence_arg)
#define DRM_IOCTL_VMW_FENCE_EVENT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
struct drm_vmw_fence_event_arg)
#define DRM_IOCTL_VMW_PRESENT \ #define DRM_IOCTL_VMW_PRESENT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
struct drm_vmw_present_arg) struct drm_vmw_present_arg)
...@@ -150,6 +153,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -150,6 +153,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_EVENT,
vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED),
......
...@@ -40,9 +40,9 @@ ...@@ -40,9 +40,9 @@
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "vmwgfx_fence.h" #include "vmwgfx_fence.h"
#define VMWGFX_DRIVER_DATE "20110927" #define VMWGFX_DRIVER_DATE "20111008"
#define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 1 #define VMWGFX_DRIVER_MINOR 2
#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
...@@ -264,10 +264,12 @@ struct vmw_private { ...@@ -264,10 +264,12 @@ struct vmw_private {
wait_queue_head_t fence_queue; wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue; wait_queue_head_t fifo_queue;
int fence_queue_waiters; /* Protected by hw_mutex */ int fence_queue_waiters; /* Protected by hw_mutex */
int goal_queue_waiters; /* Protected by hw_mutex */
atomic_t fifo_queue_waiters; atomic_t fifo_queue_waiters;
uint32_t last_read_seqno; uint32_t last_read_seqno;
spinlock_t irq_lock; spinlock_t irq_lock;
struct vmw_fence_manager *fman; struct vmw_fence_manager *fman;
uint32_t irq_mask;
/* /*
* Device state * Device state
...@@ -532,7 +534,13 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, ...@@ -532,7 +534,13 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence, struct vmw_fence_obj **p_fence,
uint32_t *p_handle); uint32_t *p_handle);
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle);
/** /**
* IRQs and wating - vmwgfx_irq.c * IRQs and wating - vmwgfx_irq.c
...@@ -557,6 +565,8 @@ extern void vmw_update_seqno(struct vmw_private *dev_priv, ...@@ -557,6 +565,8 @@ extern void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state); struct vmw_fifo_state *fifo_state);
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
/** /**
* Rudimentary fence-like objects currently used only for throttling - * Rudimentary fence-like objects currently used only for throttling -
......
...@@ -1057,7 +1057,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, ...@@ -1057,7 +1057,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* object so we wait for it immediately, and then unreference the * object so we wait for it immediately, and then unreference the
* user-space reference. * user-space reference.
*/ */
static void void
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, struct vmw_fpriv *vmw_fp,
int ret, int ret,
......
This diff is collapsed.
...@@ -37,8 +37,14 @@ struct vmw_fence_manager; ...@@ -37,8 +37,14 @@ struct vmw_fence_manager;
* *
* *
*/ */
enum vmw_action_type {
VMW_ACTION_EVENT = 0,
VMW_ACTION_MAX
};
struct vmw_fence_action { struct vmw_fence_action {
struct list_head head; struct list_head head;
enum vmw_action_type type;
void (*seq_passed) (struct vmw_fence_action *action); void (*seq_passed) (struct vmw_fence_action *action);
void (*cleanup) (struct vmw_fence_action *action); void (*cleanup) (struct vmw_fence_action *action);
}; };
...@@ -66,8 +72,7 @@ extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p); ...@@ -66,8 +72,7 @@ extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
extern struct vmw_fence_obj * extern struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence); vmw_fence_obj_reference(struct vmw_fence_obj *fence);
extern void vmw_fences_update(struct vmw_fence_manager *fman, extern void vmw_fences_update(struct vmw_fence_manager *fman);
u32 sequence);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
uint32_t flags); uint32_t flags);
...@@ -102,4 +107,7 @@ extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, ...@@ -102,4 +107,7 @@ extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif /* _VMWGFX_FENCE_H_ */ #endif /* _VMWGFX_FENCE_H_ */
...@@ -247,9 +247,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, ...@@ -247,9 +247,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS, outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_read(dev_priv, SVGA_REG_IRQMASK) | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
SVGA_IRQFLAG_FIFO_PROGRESS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
...@@ -271,9 +270,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, ...@@ -271,9 +270,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->hw_mutex); mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_read(dev_priv, SVGA_REG_IRQMASK) & vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
~SVGA_IRQFLAG_FIFO_PROGRESS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
......
...@@ -34,28 +34,30 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) ...@@ -34,28 +34,30 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{ {
struct drm_device *dev = (struct drm_device *)arg; struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status; uint32_t status, masked_status;
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & dev_priv->irq_mask;
spin_unlock(&dev_priv->irq_lock); spin_unlock(&dev_priv->irq_lock);
if (status & SVGA_IRQFLAG_ANY_FENCE) { if (likely(status))
__le32 __iomem *fifo_mem = dev_priv->mmio_virt; outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
vmw_fences_update(dev_priv->fman, seqno); if (!masked_status)
return IRQ_NONE;
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
SVGA_IRQFLAG_FENCE_GOAL)) {
vmw_fences_update(dev_priv->fman);
wake_up_all(&dev_priv->fence_queue); wake_up_all(&dev_priv->fence_queue);
} }
if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue); wake_up_all(&dev_priv->fifo_queue);
if (likely(status)) {
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
return IRQ_HANDLED;
}
return IRQ_NONE; return IRQ_HANDLED;
} }
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
...@@ -78,7 +80,7 @@ void vmw_update_seqno(struct vmw_private *dev_priv, ...@@ -78,7 +80,7 @@ void vmw_update_seqno(struct vmw_private *dev_priv,
if (dev_priv->last_read_seqno != seqno) { if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno; dev_priv->last_read_seqno = seqno;
vmw_marker_pull(&fifo_state->marker_queue, seqno); vmw_marker_pull(&fifo_state->marker_queue, seqno);
vmw_fences_update(dev_priv->fman, seqno); vmw_fences_update(dev_priv->fman);
} }
} }
...@@ -189,9 +191,8 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) ...@@ -189,9 +191,8 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_ANY_FENCE, outl(SVGA_IRQFLAG_ANY_FENCE,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
vmw_read(dev_priv, SVGA_REG_IRQMASK) | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
SVGA_IRQFLAG_ANY_FENCE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
...@@ -204,9 +205,39 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) ...@@ -204,9 +205,39 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
unsigned long irq_flags; unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
vmw_read(dev_priv, SVGA_REG_IRQMASK) & vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
~SVGA_IRQFLAG_ANY_FENCE); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FENCE_GOAL,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
......
...@@ -683,6 +683,51 @@ struct drm_vmw_fence_arg { ...@@ -683,6 +683,51 @@ struct drm_vmw_fence_arg {
}; };
/*************************************************************************/
/**
* DRM_VMW_FENCE_EVENT
*
* Queues an event on a fence to be delivered on the drm character device
* when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
* Optionally the approximate time when the fence signaled is
* given by the event.
*/
/*
* The event type
*/
#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
struct drm_vmw_event_fence {
struct drm_event base;
uint64_t user_data;
uint32_t tv_sec;
uint32_t tv_usec;
};
/*
* Flags that may be given to the command.
*/
/* Request fence signaled time on the event. */
#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
/**
* struct drm_vmw_fence_event_arg
*
* @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
* the fence is not supposed to be referenced by user-space.
* @user_info: Info to be delivered with the event.
* @handle: Attach the event to this fence only.
* @flags: A set of flags as defined above.
*/
struct drm_vmw_fence_event_arg {
uint64_t fence_rep;
uint64_t user_data;
uint32_t handle;
uint32_t flags;
};
/*************************************************************************/ /*************************************************************************/
/** /**
* DRM_VMW_PRESENT * DRM_VMW_PRESENT
...@@ -743,6 +788,4 @@ struct drm_vmw_present_readback_arg { ...@@ -743,6 +788,4 @@ struct drm_vmw_present_readback_arg {
uint64_t clips_ptr; uint64_t clips_ptr;
uint64_t fence_rep; uint64_t fence_rep;
}; };
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment