Commit 2591939e authored by Rob Clark's avatar Rob Clark Committed by Dmitry Osipenko

drm/virtio: Spiff out cmd queue/response traces

Add a sequence # for more easily matching up cmd/resp, and the # of free
slots in the virtqueue to more easily see starvation issues.

v2: Fix handling of string fields as well
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221130000841.318037-1-robdclark@gmail.com
parent 0974687a
...@@ -165,6 +165,8 @@ struct virtio_gpu_vbuffer { ...@@ -165,6 +165,8 @@ struct virtio_gpu_vbuffer {
struct virtio_gpu_object_array *objs; struct virtio_gpu_object_array *objs;
struct list_head list; struct list_head list;
uint32_t seqno;
}; };
struct virtio_gpu_output { struct virtio_gpu_output {
...@@ -194,6 +196,7 @@ struct virtio_gpu_queue { ...@@ -194,6 +196,7 @@ struct virtio_gpu_queue {
spinlock_t qlock; spinlock_t qlock;
wait_queue_head_t ack_queue; wait_queue_head_t ack_queue;
struct work_struct dequeue_work; struct work_struct dequeue_work;
uint32_t seqno;
}; };
struct virtio_gpu_drv_capset { struct virtio_gpu_drv_capset {
......
...@@ -9,40 +9,44 @@ ...@@ -9,40 +9,44 @@
#define TRACE_INCLUDE_FILE virtgpu_trace #define TRACE_INCLUDE_FILE virtgpu_trace
DECLARE_EVENT_CLASS(virtio_gpu_cmd, DECLARE_EVENT_CLASS(virtio_gpu_cmd,
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
TP_ARGS(vq, hdr), TP_ARGS(vq, hdr, seqno),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, dev) __field(int, dev)
__field(unsigned int, vq) __field(unsigned int, vq)
__field(const char *, name) __string(name, vq->name)
__field(u32, type) __field(u32, type)
__field(u32, flags) __field(u32, flags)
__field(u64, fence_id) __field(u64, fence_id)
__field(u32, ctx_id) __field(u32, ctx_id)
__field(u32, num_free)
__field(u32, seqno)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = vq->vdev->index; __entry->dev = vq->vdev->index;
__entry->vq = vq->index; __entry->vq = vq->index;
__entry->name = vq->name; __assign_str(name, vq->name);
__entry->type = le32_to_cpu(hdr->type); __entry->type = le32_to_cpu(hdr->type);
__entry->flags = le32_to_cpu(hdr->flags); __entry->flags = le32_to_cpu(hdr->flags);
__entry->fence_id = le64_to_cpu(hdr->fence_id); __entry->fence_id = le64_to_cpu(hdr->fence_id);
__entry->ctx_id = le32_to_cpu(hdr->ctx_id); __entry->ctx_id = le32_to_cpu(hdr->ctx_id);
__entry->num_free = vq->num_free;
__entry->seqno = seqno;
), ),
TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u", TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u num_free=%u seqno=%u",
__entry->dev, __entry->vq, __entry->name, __entry->dev, __entry->vq, __get_str(name),
__entry->type, __entry->flags, __entry->fence_id, __entry->type, __entry->flags, __entry->fence_id,
__entry->ctx_id) __entry->ctx_id, __entry->num_free, __entry->seqno)
); );
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue, DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
TP_ARGS(vq, hdr) TP_ARGS(vq, hdr, seqno)
); );
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response, DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
TP_ARGS(vq, hdr) TP_ARGS(vq, hdr, seqno)
); );
#endif #endif
......
...@@ -215,7 +215,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) ...@@ -215,7 +215,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry(entry, &reclaim_list, list) { list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
...@@ -261,6 +261,10 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) ...@@ -261,6 +261,10 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
spin_unlock(&vgdev->cursorq.qlock); spin_unlock(&vgdev->cursorq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
struct virtio_gpu_ctrl_hdr *resp =
(struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
list_del(&entry->list); list_del(&entry->list);
free_vbuf(vgdev, entry); free_vbuf(vgdev, entry);
} }
...@@ -353,7 +357,8 @@ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, ...@@ -353,7 +357,8 @@ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret); WARN_ON(ret);
trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); vbuf->seqno = ++vgdev->ctrlq.seqno;
trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
atomic_inc(&vgdev->pending_commands); atomic_inc(&vgdev->pending_commands);
...@@ -465,8 +470,10 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ...@@ -465,8 +470,10 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
spin_lock(&vgdev->cursorq.qlock); spin_lock(&vgdev->cursorq.qlock);
goto retry; goto retry;
} else { } else {
vbuf->seqno = ++vgdev->cursorq.seqno;
trace_virtio_gpu_cmd_queue(vq, trace_virtio_gpu_cmd_queue(vq,
virtio_gpu_vbuf_ctrl_hdr(vbuf)); virtio_gpu_vbuf_ctrl_hdr(vbuf),
vbuf->seqno);
notify = virtqueue_kick_prepare(vq); notify = virtqueue_kick_prepare(vq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment