clk: Unprepare the unused prepared slow clocks at late init
authorUlf Hansson <ulf.hansson@linaro.org>
Tue, 12 Mar 2013 19:26:03 +0000 (20:26 +0100)
committerMike Turquette <mturquette@linaro.org>
Tue, 19 Mar 2013 19:58:42 +0000 (12:58 -0700)
The unused ungated fast clocks are already being disabled from
clk_disable_unused at late init. This patch extend this sequence
to the slow unused prepared clocks to be unprepared.

Unless the optional .is_prepared callback is implemented by a
clk_hw the clk_disable_unused sequence will not unprepare any
unused clocks, since it will fall back to use the software
prepare counter.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Mike Turquette <mturquette@linaro.org>
[mturquette@linaro.org: fixed hlist accessors per b67bfe0d]

drivers/clk/clk.c

index 7571b5054f3cebce0375d1451701e96dea551d75..c0141f3e110941c1871502ebeeb6f81a255531fe 100644 (file)
@@ -335,6 +335,28 @@ late_initcall(clk_debug_init);
 static inline int clk_debug_register(struct clk *clk) { return 0; }
 #endif
 
+/* caller must hold prepare_lock */
+static void clk_unprepare_unused_subtree(struct clk *clk)
+{
+       struct clk *child;
+
+       if (!clk)
+               return;
+
+       hlist_for_each_entry(child, &clk->children, child_node)
+               clk_unprepare_unused_subtree(child);
+
+       if (clk->prepare_count)
+               return;
+
+       if (clk->flags & CLK_IGNORE_UNUSED)
+               return;
+
+       if (__clk_is_prepared(clk))
+               if (clk->ops->unprepare)
+                       clk->ops->unprepare(clk->hw);
+}
+
 /* caller must hold prepare_lock */
 static void clk_disable_unused_subtree(struct clk *clk)
 {
@@ -386,6 +408,12 @@ static int clk_disable_unused(void)
        hlist_for_each_entry(clk, &clk_orphan_list, child_node)
                clk_disable_unused_subtree(clk);
 
+       hlist_for_each_entry(clk, &clk_root_list, child_node)
+               clk_unprepare_unused_subtree(clk);
+
+       hlist_for_each_entry(clk, &clk_orphan_list, child_node)
+               clk_unprepare_unused_subtree(clk);
+
        mutex_unlock(&prepare_lock);
 
        return 0;