drm/amdgpu: add plumbing for ctx priority changes v2
authorAndres Rodriguez <andresx7@gmail.com>
Wed, 7 Jun 2017 00:20:38 +0000 (20:20 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 9 Oct 2017 20:30:24 +0000 (16:30 -0400)
Introduce amdgpu_ctx_priority_override(). A mechanism to override a
context's priority.

An override can be terminated by setting the override to
AMD_SCHED_PRIORITY_UNSET.

v2: change refcounted interface for a direct set

Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index 715ce4863bc8276e008f412582cb87e0bbe3c597..951c8db01412a228bf6444ce0a2aeb1c6b0080bf 100644 (file)
@@ -735,7 +735,9 @@ struct amdgpu_ctx {
        spinlock_t              ring_lock;
        struct dma_fence        **fences;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
-       bool preamble_presented;
+       bool                    preamble_presented;
+       enum amd_sched_priority init_priority;
+       enum amd_sched_priority override_priority;
 };
 
 struct amdgpu_ctx_mgr {
@@ -752,6 +754,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
                              struct dma_fence *fence, uint64_t *seq);
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
                                   struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+                                 enum amd_sched_priority priority);
 
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *filp);
index 29eebdc30a4c47f142947b601a43f27cea396fd4..d2ef24f4b56d7b7c960fbf7036ba37bc18c6bfb2 100644 (file)
@@ -72,6 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
        }
 
        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+       ctx->init_priority = priority;
+       ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
 
        /* create context entity for each ring */
        for (i = 0; i < adev->num_rings; i++) {
@@ -362,6 +364,33 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
        return fence;
 }
 
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+                                 enum amd_sched_priority priority)
+{
+       int i;
+       struct amdgpu_device *adev = ctx->adev;
+       struct amd_sched_rq *rq;
+       struct amd_sched_entity *entity;
+       struct amdgpu_ring *ring;
+       enum amd_sched_priority ctx_prio;
+
+       ctx->override_priority = priority;
+
+       ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+                       ctx->init_priority : ctx->override_priority;
+
+       for (i = 0; i < adev->num_rings; i++) {
+               ring = adev->rings[i];
+               entity = &ctx->rings[i].entity;
+               rq = &ring->sched.sched_rq[ctx_prio];
+
+               if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+                       continue;
+
+               amd_sched_entity_set_rq(entity, rq);
+       }
+}
+
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 {
        mutex_init(&mgr->lock);