*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
+ vmw_resource_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
+
+
+/*
+ * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_dma_buffer_map_and_cache().
+ */
+void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
+{
+ if (vbo->map.bo == NULL)
+ return;
+
+ ttm_bo_kunmap(&vbo->map);
+}
+
+
+/*
+ * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
+{
+ struct ttm_buffer_object *bo = &vbo->base;
+ bool not_used;
+ void *virtual;
+ int ret;
+
+ virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
+ if (virtual)
+ return virtual;
+
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ if (ret)
+ DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+ return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
+}
s32 pin_count;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
+ /* Protected by reservation */
+ struct ttm_bo_kmap_obj map;
};
/**
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
+extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
+extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
+extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo;
- struct ttm_bo_kmap_obj map;
- void *bo_ptr;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
unsigned long irq_flags;
- s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+ s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
u32 cpp, max_x, max_y;
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
+ struct vmw_dma_buffer *vbo = par->vmw_bo;
+ void *virtual;
if (vmw_priv->suspended)
return;
if (!cur_fb)
goto out_unlock;
+ (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
+ (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+ virtual = vmw_dma_buffer_map_and_cache(vbo);
+ if (!virtual)
+ goto out_unreserve;
+
spin_lock_irqsave(&par->dirty.lock, irq_flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
- goto out_unlock;
+ goto out_unreserve;
}
/*
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
if (w && h) {
- dst_ptr = (u8 *)par->bo_ptr +
+ dst_ptr = (u8 *)virtual +
(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
src_ptr = (u8 *)par->vmalloc +
((dst_y1 + par->fb_y) * info->fix.line_length +
clip.x2 = dst_x2;
clip.y1 = dst_y1;
clip.y2 = dst_y2;
+ }
+out_unreserve:
+ ttm_bo_unreserve(&vbo->base);
+ ttm_read_unlock(&vmw_priv->reservation_sem);
+ if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
vmw_fifo_flush(vmw_priv, false);
par->set_fb = NULL;
}
- if (par->vmw_bo && detach_bo) {
- struct vmw_private *vmw_priv = par->vmw_priv;
-
- if (par->bo_ptr) {
- ttm_bo_kunmap(&par->map);
- par->bo_ptr = NULL;
- }
- if (unref_bo)
- vmw_dmabuf_unreference(&par->vmw_bo);
- else if (vmw_priv->active_display_unit != vmw_du_legacy)
- vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
- }
+ if (par->vmw_bo && detach_bo && unref_bo)
+ vmw_dmabuf_unreference(&par->vmw_bo);
return 0;
}
if (ret)
goto out_unlock;
- if (!par->bo_ptr) {
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
-
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us. LDU doesn't require
- * additional pinning because set_config() would've pinned
- * it already
- */
- if (vmw_priv->active_display_unit != vmw_du_legacy) {
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev "
- "framebuffer.\n");
- goto out_unlock;
- }
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- if (vmw_priv->active_display_unit != vmw_du_legacy)
- vfb->unpin(vfb);
-
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- goto out_unlock;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
-
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ vmw_dma_buffer_unmap(vmw_bo);
kfree(vmw_bo);
}
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ vmw_dma_buffer_unmap(&vmw_user_bo->dma);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+ /*
+ * Kill any cached kernel maps before move. An optimization could
+ * be to do this iff source or destination memory type is VRAM.
+ */
+ vmw_dma_buffer_unmap(dma_buf);
+
if (mem->mem_type != VMW_PL_MOB) {
struct vmw_resource *res, *n;
struct ttm_validate_buffer val_buf;
}
+/**
+ * vmw_resource_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ /* Kill any cached kernel maps before swapout */
+ vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+}
+
/**
* vmw_query_readback_all - Read back cached query states