return 0;
}
-static void free_workload(struct intel_vgpu_workload *workload)
-{
- intel_gvt_mm_unreference(workload->shadow_mm);
- kmem_cache_free(workload->vgpu->submission.workloads, workload);
-}
-
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
out:
intel_vgpu_unpin_mm(workload->shadow_mm);
- free_workload(workload);
+ intel_vgpu_destroy_workload(workload);
return ret;
}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
- workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
- if (!workload)
- return -ENOMEM;
-
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
- INIT_LIST_HEAD(&workload->list);
- INIT_LIST_HEAD(&workload->shadow_bb);
-
- init_waitqueue_head(&workload->shadow_ctx_status_wq);
- atomic_set(&workload->shadow_ctx_active, 0);
+ workload = intel_vgpu_create_workload(vgpu);
+ if (IS_ERR(workload))
+ return PTR_ERR(workload);
- workload->vgpu = vgpu;
workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
- workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
- workload->shadowed = false;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
if (ret == 0)
queue_workload(workload);
else {
- free_workload(workload);
+ intel_vgpu_destroy_workload(workload);
if (vgpu_is_vm_unhealthy(ret)) {
intel_vgpu_clean_execlist(vgpu);
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
- free_workload(pos);
+ intel_vgpu_destroy_workload(pos);
}
clear_bit(engine->id, s->shadow_ctx_desc_updated);
}
i915_gem_context_put(s->shadow_ctx);
return ret;
}
+
+/**
+ * intel_vgpu_destroy_workload - destroy a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when destroy a vGPU workload.
+ *
+ */
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+
+ if (workload->shadow_mm)
+ intel_gvt_mm_unreference(workload->shadow_mm);
+
+ kmem_cache_free(s->workloads, workload);
+}
+
+/**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when creating a vGPU workload.
+ *
+ * Returns:
+ * struct intel_vgpu_workload * on success, negative error code in
+ * pointer if failed.
+ *
+ */
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_workload *workload;
+
+ workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
+ if (!workload)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->status = -EINPROGRESS;
+ workload->shadowed = false;
+ workload->vgpu = vgpu;
+
+ return workload;
+}