spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
- bool preamble_presented;
+ bool preamble_presented;
+ enum amd_sched_priority init_priority;
+ enum amd_sched_priority override_priority;
};
struct amdgpu_ctx_mgr {
struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
}
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->init_priority = priority;
+ ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
return fence;
}
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority)
+{
+ int i;
+ struct amdgpu_device *adev = ctx->adev;
+ struct amd_sched_rq *rq;
+ struct amd_sched_entity *entity;
+ struct amdgpu_ring *ring;
+ enum amd_sched_priority ctx_prio;
+
+ ctx->override_priority = priority;
+
+ ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ ring = adev->rings[i];
+ entity = &ctx->rings[i].entity;
+ rq = &ring->sched.sched_rq[ctx_prio];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ continue;
+
+ amd_sched_entity_set_rq(entity, rq);
+ }
+}
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);