Commit 50c3d193 authored by Gurchetan Singh's avatar Gurchetan Singh Committed by Gerd Hoffmann

drm/virtio: implement blob resources: fix stride discrepancy

The old transfer ioctls may work on blob resources, and there is no
TRANSFER_BLOB hypercall now for simplicity.

The guest may have a image view on the blob resources such that the
stride is not equal to width * bytes_per_pixel.

For host-only blobs, we can repurpose the transfer ioctls to synchronize
caches as well.  For guest-only blobs, these operations are undefined
for now so leave them out.

Also, with seamless Wayland integration between guest/host looking
increasingly attractive, it also makes sense to keep track of
one value for stride.
Signed-off-by: default avatarGurchetan Singh <gurchetansingh@chromium.org>
Acked-by: default avatarTomeu Vizoso <tomeu.vizoso@collabora.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200924003214.662-16-gurchetansingh@chromium.orgSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent 1e2554f4
...@@ -351,12 +351,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, ...@@ -351,12 +351,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
uint32_t stride,
uint32_t layer_stride,
struct drm_virtgpu_3d_box *box, struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs, struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence); struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
uint32_t stride,
uint32_t layer_stride,
struct drm_virtgpu_3d_box *box, struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs, struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence); struct virtio_gpu_fence *fence);
......
...@@ -312,6 +312,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ...@@ -312,6 +312,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data; struct drm_virtgpu_3d_transfer_from_host *args = data;
struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs; struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence; struct virtio_gpu_fence *fence;
int ret; int ret;
...@@ -325,6 +326,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ...@@ -325,6 +326,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (objs == NULL) if (objs == NULL)
return -ENOENT; return -ENOENT;
bo = gem_to_virtio_gpu_obj(objs->objs[0]);
if (bo->guest_blob && !bo->host3d_blob) {
ret = -EINVAL;
goto err_put_free;
}
if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
ret = -EINVAL;
goto err_put_free;
}
ret = virtio_gpu_array_lock_resv(objs); ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0) if (ret != 0)
goto err_put_free; goto err_put_free;
...@@ -334,9 +346,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ...@@ -334,9 +346,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
ret = -ENOMEM; ret = -ENOMEM;
goto err_unlock; goto err_unlock;
} }
virtio_gpu_cmd_transfer_from_host_3d virtio_gpu_cmd_transfer_from_host_3d
(vgdev, vfpriv->ctx_id, offset, args->level, (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
&args->box, objs, fence); args->layer_stride, &args->box, objs, fence);
dma_fence_put(&fence->f); dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev); virtio_gpu_notify(vgdev);
return 0; return 0;
...@@ -354,6 +367,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -354,6 +367,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data; struct drm_virtgpu_3d_transfer_to_host *args = data;
struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs; struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence; struct virtio_gpu_fence *fence;
int ret; int ret;
...@@ -363,6 +377,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -363,6 +377,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (objs == NULL) if (objs == NULL)
return -ENOENT; return -ENOENT;
bo = gem_to_virtio_gpu_obj(objs->objs[0]);
if (bo->guest_blob && !bo->host3d_blob) {
ret = -EINVAL;
goto err_put_free;
}
if (!vgdev->has_virgl_3d) { if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset, (vgdev, offset,
...@@ -370,6 +390,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -370,6 +390,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
objs, NULL); objs, NULL);
} else { } else {
virtio_gpu_create_context(dev, file); virtio_gpu_create_context(dev, file);
if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
ret = -EINVAL;
goto err_put_free;
}
ret = virtio_gpu_array_lock_resv(objs); ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0) if (ret != 0)
goto err_put_free; goto err_put_free;
...@@ -381,8 +407,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -381,8 +407,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_transfer_to_host_3d virtio_gpu_cmd_transfer_to_host_3d
(vgdev, (vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset, vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
args->level, &args->box, objs, fence); args->stride, args->layer_stride, &args->box, objs,
fence);
dma_fence_put(&fence->f); dma_fence_put(&fence->f);
} }
virtio_gpu_notify(vgdev); virtio_gpu_notify(vgdev);
......
...@@ -1017,6 +1017,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, ...@@ -1017,6 +1017,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
uint32_t stride,
uint32_t layer_stride,
struct drm_virtgpu_3d_box *box, struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs, struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
...@@ -1025,12 +1027,14 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, ...@@ -1025,12 +1027,14 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api) if (virtio_gpu_is_shmem(bo) && use_dma_api) {
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
dma_sync_sg_for_device(vgdev->vdev->dev.parent, dma_sync_sg_for_device(vgdev->vdev->dev.parent,
shmem->pages->sgl, shmem->pages->nents, shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
}
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p));
...@@ -1043,6 +1047,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, ...@@ -1043,6 +1047,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box); convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset); cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level); cmd_p->level = cpu_to_le32(level);
cmd_p->stride = cpu_to_le32(stride);
cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
} }
...@@ -1050,6 +1056,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, ...@@ -1050,6 +1056,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
uint32_t stride,
uint32_t layer_stride,
struct drm_virtgpu_3d_box *box, struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs, struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
...@@ -1069,6 +1077,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, ...@@ -1069,6 +1077,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box); convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset); cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level); cmd_p->level = cpu_to_le32(level);
cmd_p->stride = cpu_to_le32(stride);
cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment