Commit af334c5d authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: pass virtio_gpu_object to virtio_gpu_cmd_transfer_to_host_{2d, 3d}

Pass virtio_gpu_object down to virtio_gpu_cmd_transfer_to_host_2d and
virtio_gpu_cmd_transfer_to_host_3d functions, instead of passing just
the virtio resource handle.

This is needed to lookup the scatter list of the object, for dma sync.
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarJiandi An <jiandi.an@amd.com>
Tested-by: default avatarJiandi An <jiandi.an@amd.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180920062924.6514-1-kraxel@redhat.com
parent c3a8d6ea
...@@ -270,7 +270,8 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, ...@@ -270,7 +270,8 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id); uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint64_t offset, struct virtio_gpu_object *bo,
uint64_t offset,
__le32 width, __le32 height, __le32 width, __le32 height,
__le32 x, __le32 y, __le32 x, __le32 y,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence **fence);
...@@ -316,7 +317,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, ...@@ -316,7 +317,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence **fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id, struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence **fence);
......
...@@ -95,7 +95,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb, ...@@ -95,7 +95,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
offset = (y * fb->base.pitches[0]) + x * bpp; offset = (y * fb->base.pitches[0]) + x * bpp;
virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle, virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj,
offset, offset,
cpu_to_le32(w), cpu_to_le32(w),
cpu_to_le32(h), cpu_to_le32(h),
......
...@@ -429,11 +429,11 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -429,11 +429,11 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
convert_to_hw_box(&box, &args->box); convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) { if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d virtio_gpu_cmd_transfer_to_host_2d
(vgdev, qobj->hw_res_handle, offset, (vgdev, qobj, offset,
box.w, box.h, box.x, box.y, NULL); box.w, box.h, box.x, box.y, NULL);
} else { } else {
virtio_gpu_cmd_transfer_to_host_3d virtio_gpu_cmd_transfer_to_host_3d
(vgdev, qobj->hw_res_handle, (vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset, vfpriv ? vfpriv->ctx_id : 0, offset,
args->level, &box, &fence); args->level, &box, &fence);
reservation_object_add_excl_fence(qobj->tbo.resv, reservation_object_add_excl_fence(qobj->tbo.resv,
......
...@@ -158,7 +158,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, ...@@ -158,7 +158,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
handle = bo->hw_res_handle; handle = bo->hw_res_handle;
if (bo->dumb) { if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d virtio_gpu_cmd_transfer_to_host_2d
(vgdev, handle, 0, (vgdev, bo, 0,
cpu_to_le32(plane->state->src_w >> 16), cpu_to_le32(plane->state->src_w >> 16),
cpu_to_le32(plane->state->src_h >> 16), cpu_to_le32(plane->state->src_h >> 16),
cpu_to_le32(plane->state->src_x >> 16), cpu_to_le32(plane->state->src_x >> 16),
...@@ -217,7 +217,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, ...@@ -217,7 +217,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
/* new cursor -- update & wait */ /* new cursor -- update & wait */
virtio_gpu_cmd_transfer_to_host_2d virtio_gpu_cmd_transfer_to_host_2d
(vgdev, handle, 0, (vgdev, bo, 0,
cpu_to_le32(plane->state->crtc_w), cpu_to_le32(plane->state->crtc_w),
cpu_to_le32(plane->state->crtc_h), cpu_to_le32(plane->state->crtc_h),
0, 0, &fence); 0, 0, &fence);
......
...@@ -483,28 +483,26 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, ...@@ -483,28 +483,26 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
} }
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint64_t offset, struct virtio_gpu_object *bo,
uint64_t offset,
__le32 width, __le32 height, __le32 width, __le32 height,
__le32 x, __le32 y, __le32 x, __le32 y,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence **fence)
{ {
struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
if (use_dma_api) if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent, dma_sync_sg_for_device(vgdev->vdev->dev.parent,
obj->pages->sgl, obj->pages->nents, bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(resource_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset); cmd_p->offset = cpu_to_le64(offset);
cmd_p->r.width = width; cmd_p->r.width = width;
cmd_p->r.height = height; cmd_p->r.height = height;
...@@ -791,21 +789,19 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, ...@@ -791,21 +789,19 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
} }
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id, struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence **fence)
{ {
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
if (use_dma_api) if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent, dma_sync_sg_for_device(vgdev->vdev->dev.parent,
obj->pages->sgl, obj->pages->nents, bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
...@@ -813,7 +809,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, ...@@ -813,7 +809,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(resource_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->box = *box; cmd_p->box = *box;
cmd_p->offset = cpu_to_le64(offset); cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level); cmd_p->level = cpu_to_le32(level);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment