sched/idle: Merge kernel/sched/idle.c and kernel/sched/idle_task.c
authorIngo Molnar <mingo@kernel.org>
Sat, 3 Mar 2018 14:44:39 +0000 (15:44 +0100)
committerIngo Molnar <mingo@kernel.org>
Sun, 4 Mar 2018 11:39:33 +0000 (12:39 +0100)
Merge these two small .c modules as they implement two aspects
of idle task handling.

Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/Makefile
kernel/sched/idle.c
kernel/sched/idle_task.c [deleted file]

index e2f9d4feff4015dd747a3dea06a345076b7709fd..d9a02b31810896b3201ba5bcee426ce2e1ac34a1 100644 (file)
@@ -17,8 +17,9 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
 endif
 
 obj-y += core.o loadavg.o clock.o cputime.o
-obj-y += idle_task.o fair.o rt.o deadline.o
-obj-y += wait.o wait_bit.o swait.o completion.o idle.o
+obj-y += idle.o fair.o rt.o deadline.o
+obj-y += wait.o wait_bit.o swait.o completion.o
+
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
index 2760e0357271e83b3b52ad91c08b5b46a1e3adbe..2975f195e1c40427acc33f938a3c9c0174ab5db2 100644 (file)
@@ -1,5 +1,9 @@
 /*
- * Generic entry points for the idle threads
+ * Generic entry points for the idle threads and
+ * implementation of the idle task scheduling class.
+ *
+ * (NOTE: these are not related to SCHED_IDLE batch scheduled
+ *        tasks which are handled in sched/fair.c )
  */
 #include "sched.h"
 
@@ -33,6 +37,7 @@ void cpu_idle_poll_ctrl(bool enable)
 static int __init cpu_idle_poll_setup(char *__unused)
 {
        cpu_idle_force_poll = 1;
+
        return 1;
 }
 __setup("nohlt", cpu_idle_poll_setup);
@@ -40,6 +45,7 @@ __setup("nohlt", cpu_idle_poll_setup);
 static int __init cpu_idle_nopoll_setup(char *__unused)
 {
        cpu_idle_force_poll = 0;
+
        return 1;
 }
 __setup("hlt", cpu_idle_nopoll_setup);
@@ -51,12 +57,14 @@ static noinline int __cpuidle cpu_idle_poll(void)
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
        stop_critical_timings();
+
        while (!tif_need_resched() &&
                (cpu_idle_force_poll || tick_check_broadcast_expired()))
                cpu_relax();
        start_critical_timings();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        rcu_idle_exit();
+
        return 1;
 }
 
@@ -337,3 +345,116 @@ void cpu_startup_entry(enum cpuhp_state state)
        while (1)
                do_idle();
 }
+
+/*
+ * idle-task scheduling class.
+ */
+
+#ifdef CONFIG_SMP
+static int
+select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
+{
+       return task_cpu(p); /* IDLE tasks as never migrated */
+}
+#endif
+
+/*
+ * Idle tasks are unconditionally rescheduled:
+ */
+static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
+{
+       resched_curr(rq);
+}
+
+static struct task_struct *
+pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+       put_prev_task(rq, prev);
+       update_idle_core(rq);
+       schedstat_inc(rq->sched_goidle);
+
+       return rq->idle;
+}
+
+/*
+ * It is not legal to sleep in the idle task - print a warning
+ * message if some code attempts to do it:
+ */
+static void
+dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
+{
+       raw_spin_unlock_irq(&rq->lock);
+       printk(KERN_ERR "bad: scheduling from the idle thread!\n");
+       dump_stack();
+       raw_spin_lock_irq(&rq->lock);
+}
+
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+{
+}
+
+/*
+ * scheduler tick hitting a task of our scheduling class.
+ *
+ * NOTE: This function can be called remotely by the tick offload that
+ * goes along full dynticks. Therefore no local assumption can be made
+ * and everything must be accessed through the @rq and @curr passed in
+ * parameters.
+ */
+static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
+{
+}
+
+static void set_curr_task_idle(struct rq *rq)
+{
+}
+
+static void switched_to_idle(struct rq *rq, struct task_struct *p)
+{
+       BUG();
+}
+
+static void
+prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
+{
+       BUG();
+}
+
+static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
+{
+       return 0;
+}
+
+static void update_curr_idle(struct rq *rq)
+{
+}
+
+/*
+ * Simple, special scheduling class for the per-CPU idle tasks:
+ */
+const struct sched_class idle_sched_class = {
+       /* .next is NULL */
+       /* no enqueue/yield_task for idle tasks */
+
+       /* dequeue is not valid, we print a debug message there: */
+       .dequeue_task           = dequeue_task_idle,
+
+       .check_preempt_curr     = check_preempt_curr_idle,
+
+       .pick_next_task         = pick_next_task_idle,
+       .put_prev_task          = put_prev_task_idle,
+
+#ifdef CONFIG_SMP
+       .select_task_rq         = select_task_rq_idle,
+       .set_cpus_allowed       = set_cpus_allowed_common,
+#endif
+
+       .set_curr_task          = set_curr_task_idle,
+       .task_tick              = task_tick_idle,
+
+       .get_rr_interval        = get_rr_interval_idle,
+
+       .prio_changed           = prio_changed_idle,
+       .switched_to            = switched_to_idle,
+       .update_curr            = update_curr_idle,
+};
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
deleted file mode 100644 (file)
index 488222a..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * idle-task scheduling class.
- *
- * (NOTE: these are not related to SCHED_IDLE batch scheduling tasks which are
- *  handled in sched/fair.c)
- */
-#include "sched.h"
-
-#ifdef CONFIG_SMP
-static int
-select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
-{
-       return task_cpu(p); /* IDLE tasks as never migrated */
-}
-#endif
-
-/*
- * Idle tasks are unconditionally rescheduled:
- */
-static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
-{
-       resched_curr(rq);
-}
-
-static struct task_struct *
-pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-{
-       put_prev_task(rq, prev);
-       update_idle_core(rq);
-       schedstat_inc(rq->sched_goidle);
-
-       return rq->idle;
-}
-
-/*
- * It is not legal to sleep in the idle task - print a warning
- * message if some code attempts to do it:
- */
-static void
-dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
-{
-       raw_spin_unlock_irq(&rq->lock);
-       printk(KERN_ERR "bad: scheduling from the idle thread!\n");
-       dump_stack();
-       raw_spin_lock_irq(&rq->lock);
-}
-
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
-{
-}
-
-/*
- * scheduler tick hitting a task of our scheduling class.
- *
- * NOTE: This function can be called remotely by the tick offload that
- * goes along full dynticks. Therefore no local assumption can be made
- * and everything must be accessed through the @rq and @curr passed in
- * parameters.
- */
-static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
-{
-}
-
-static void set_curr_task_idle(struct rq *rq)
-{
-}
-
-static void switched_to_idle(struct rq *rq, struct task_struct *p)
-{
-       BUG();
-}
-
-static void
-prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
-{
-       BUG();
-}
-
-static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
-{
-       return 0;
-}
-
-static void update_curr_idle(struct rq *rq)
-{
-}
-
-/*
- * Simple, special scheduling class for the per-CPU idle tasks:
- */
-const struct sched_class idle_sched_class = {
-       /* .next is NULL */
-       /* no enqueue/yield_task for idle tasks */
-
-       /* dequeue is not valid, we print a debug message there: */
-       .dequeue_task           = dequeue_task_idle,
-
-       .check_preempt_curr     = check_preempt_curr_idle,
-
-       .pick_next_task         = pick_next_task_idle,
-       .put_prev_task          = put_prev_task_idle,
-
-#ifdef CONFIG_SMP
-       .select_task_rq         = select_task_rq_idle,
-       .set_cpus_allowed       = set_cpus_allowed_common,
-#endif
-
-       .set_curr_task          = set_curr_task_idle,
-       .task_tick              = task_tick_idle,
-
-       .get_rr_interval        = get_rr_interval_idle,
-
-       .prio_changed           = prio_changed_idle,
-       .switched_to            = switched_to_idle,
-       .update_curr            = update_curr_idle,
-};