CSA is the Context Save Area for preemption.
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
if (r)
return r;
- if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
struct dma_fence *f;
bo_va = fpriv->csa_va;
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
- if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
+ if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
+ (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
ce_preempt++;
adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */
- if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_CSA_SIZE);
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- skip_preamble &&
- !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
- !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
+ skip_preamble &&
+ !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
+ !amdgpu_mcbp &&
+ !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
amdgpu_ring_emit_ib(ring, job, ib, status);
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
goto error_vm;
}
- if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
- if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
amdgpu_vm_bo_rmv(adev, fpriv->csa_va);