sched/fair: Remove #ifdefs from scale_rt_capacity()
authorVincent Guittot <vincent.guittot@linaro.org>
Thu, 19 Jul 2018 12:00:06 +0000 (14:00 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 25 Jul 2018 09:41:05 +0000 (11:41 +0200)
Reuse cpu_util_irq() that has been defined for schedutil and set irq util
to 0 when !CONFIG_IRQ_TIME_ACCOUNTING.

But the compiler is not able to optimize the sequence (at least with
aarch64 GCC 7.2.1):

free *= (max - irq);
free /= max;

when irq is fixed to 0

Add a new inline function scale_irq_capacity() that will scale utilization
when irq is accounted. Reuse this funciton in schedutil which applies
similar formula.

Suggested-by: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: rjw@rjwysocki.net
Link: http://lkml.kernel.org/r/1532001606-6689-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/sched.h

index c3cf7d992159119893a7b75e1ba210dea06229b2..fc177c06e490dbe8f708c476f113bfa948e13eea 100644 (file)
@@ -177,7 +177,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 
        rq->clock_task += delta;
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef HAVE_SCHED_AVG_IRQ
        if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
                update_irq_load_avg(rq, irq_delta + steal);
 #endif
index 97dcd4472a0e94421032d58b48d231c20bd7213d..3fffad3bc8a86a366b93d5480c8840f34a84121e 100644 (file)
@@ -247,8 +247,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
         *   U' = irq + ------- * U
         *                max
         */
-       util *= (max - irq);
-       util /= max;
+       util = scale_irq_capacity(util, irq, max);
        util += irq;
 
        /*
index d5f7d521e4488b12b13bac142b3e8ceabcedfd7a..14c3fddf822a9ea3a752d9275a560a6a85e75e02 100644 (file)
@@ -7551,16 +7551,12 @@ static unsigned long scale_rt_capacity(int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
        unsigned long used, free;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
        unsigned long irq;
-#endif
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       irq = READ_ONCE(rq->avg_irq.util_avg);
+       irq = cpu_util_irq(rq);
 
        if (unlikely(irq >= max))
                return 1;
-#endif
 
        used = READ_ONCE(rq->avg_rt.util_avg);
        used += READ_ONCE(rq->avg_dl.util_avg);
@@ -7569,11 +7565,8 @@ static unsigned long scale_rt_capacity(int cpu)
                return 1;
 
        free = max - used;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       free *= (max - irq);
-       free /= max;
-#endif
-       return free;
+
+       return scale_irq_capacity(free, irq, max);
 }
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
index ebb4b3c3ece727ccbcbd016ccfc95df146c2e0dc..614170d9b1aa4849780fbba23e3c27229c9ac0c5 100644 (file)
@@ -856,6 +856,7 @@ struct rq {
        struct sched_avg        avg_rt;
        struct sched_avg        avg_dl;
 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#define HAVE_SCHED_AVG_IRQ
        struct sched_avg        avg_irq;
 #endif
        u64                     idle_stamp;
@@ -2210,17 +2211,32 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
 {
        return READ_ONCE(rq->avg_rt.util_avg);
 }
+#endif
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef HAVE_SCHED_AVG_IRQ
 static inline unsigned long cpu_util_irq(struct rq *rq)
 {
        return rq->avg_irq.util_avg;
 }
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+       util *= (max - irq);
+       util /= max;
+
+       return util;
+
+}
 #else
 static inline unsigned long cpu_util_irq(struct rq *rq)
 {
        return 0;
 }
 
-#endif
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+       return util;
+}
 #endif