flushed = id->flushed_updates;
if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
- (atomic64_read(&id->owner) != vm->entity.fence_context) ||
+ (id->owner != vm->entity.fence_context) ||
(job->vm_pd_addr != id->pd_gpu_addr) ||
(updates && (!flushed || updates->context != flushed->context ||
dma_fence_is_later(updates, flushed))) ||
id->flushed_updates = dma_fence_get(updates);
}
id->pd_gpu_addr = job->vm_pd_addr;
- atomic64_set(&id->owner, vm->entity.fence_context);
+ id->owner = vm->entity.fence_context;
job->vm_needs_flush = needs_flush;
if (needs_flush) {
dma_fence_put(id->last_flush);
if (amdgpu_vmid_had_gpu_reset(adev, id))
continue;
- if (atomic64_read(&id->owner) != vm->entity.fence_context)
+ if (id->owner != vm->entity.fence_context)
continue;
if (job->vm_pd_addr != id->pd_gpu_addr)
id->pd_gpu_addr = job->vm_pd_addr;
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
- atomic64_set(&id->owner, vm->entity.fence_context);
+ id->owner = vm->entity.fence_context;
needs_flush:
job->vm_needs_flush = true;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[vmid];
- atomic64_set(&id->owner, 0);
+ mutex_lock(&id_mgr->lock);
+ id->owner = 0;
id->gds_base = 0;
id->gds_size = 0;
id->gws_base = 0;
id->gws_size = 0;
id->oa_base = 0;
id->oa_size = 0;
+ mutex_unlock(&id_mgr->lock);
}
/**