void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint64_t offset,
+ struct virtio_gpu_object *bo,
+ uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
struct virtio_gpu_fence **fence);
struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ struct virtio_gpu_object *bo,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence);
convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, qobj->hw_res_handle, offset,
+ (vgdev, qobj, offset,
box.w, box.h, box.x, box.y, NULL);
} else {
virtio_gpu_cmd_transfer_to_host_3d
- (vgdev, qobj->hw_res_handle,
+ (vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset,
args->level, &box, &fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
handle = bo->hw_res_handle;
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, handle, 0,
+ (vgdev, bo, 0,
cpu_to_le32(plane->state->src_w >> 16),
cpu_to_le32(plane->state->src_h >> 16),
cpu_to_le32(plane->state->src_x >> 16),
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
/* new cursor -- update & wait */
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, handle, 0,
+ (vgdev, bo, 0,
cpu_to_le32(plane->state->crtc_w),
cpu_to_le32(plane->state->crtc_h),
0, 0, &fence);
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint64_t offset,
+ struct virtio_gpu_object *bo,
+ uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
struct virtio_gpu_fence **fence)
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
- struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->pages->nents,
+ bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->r.width = width;
cmd_p->r.height = height;
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ struct virtio_gpu_object *bo,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
- struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->pages->nents,
+ bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->box = *box;
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);