drm/virtio: pass virtio_gpu_object to virtio_gpu_cmd_transfer_to_host_{2d, 3d}

Pass virtio_gpu_object down to virtio_gpu_cmd_transfer_to_host_2d and
virtio_gpu_cmd_transfer_to_host_3d functions, instead of passing just
the virtio resource handle.

This is needed to lookup the scatter list of the object, for dma sync.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Jiandi An <jiandi.an@amd.com>
Tested-by: Jiandi An <jiandi.an@amd.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180920062924.6514-1-kraxel@redhat.com
This commit is contained in:
Gerd Hoffmann
2018-09-20 08:29:23 +02:00
parent c3a8d6ea73
commit af334c5d41
5 changed files with 17 additions and 19 deletions

View File

@@ -483,28 +483,26 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint64_t offset,
struct virtio_gpu_object *bo,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
struct virtio_gpu_fence **fence)
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
obj->pages->sgl, obj->pages->nents,
bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(resource_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->r.width = width;
cmd_p->r.height = height;
@@ -791,21 +789,19 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
obj->pages->sgl, obj->pages->nents,
bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -813,7 +809,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(resource_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->box = *box;
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);