|
@@ -844,3 +844,136 @@ void clk_unregister(struct clk *clk)
|
|
|
return;
|
|
|
|
|
|
mutex_lock(&clocks_mutex);
|
|
|
+ list_del(&clk->sibling);
|
|
|
+ list_del(&clk->node);
|
|
|
+ mutex_unlock(&clocks_mutex);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(clk_unregister);
|
|
|
+
|
|
|
+void clk_enable_init_clocks(void)
|
|
|
+{
|
|
|
+ struct clk *clkp;
|
|
|
+
|
|
|
+ list_for_each_entry(clkp, &clocks, node)
|
|
|
+ if (clkp->flags & ENABLE_ON_INIT)
|
|
|
+ clk_enable(clkp);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * omap_clk_get_by_name - locate OMAP struct clk by its name
|
|
|
+ * @name: name of the struct clk to locate
|
|
|
+ *
|
|
|
+ * Locate an OMAP struct clk by its name. Assumes that struct clk
|
|
|
+ * names are unique. Returns NULL if not found or a pointer to the
|
|
|
+ * struct clk if found.
|
|
|
+ */
|
|
|
+struct clk *omap_clk_get_by_name(const char *name)
|
|
|
+{
|
|
|
+ struct clk *c;
|
|
|
+ struct clk *ret = NULL;
|
|
|
+
|
|
|
+ mutex_lock(&clocks_mutex);
|
|
|
+
|
|
|
+ list_for_each_entry(c, &clocks, node) {
|
|
|
+ if (!strcmp(c->name, name)) {
|
|
|
+ ret = c;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&clocks_mutex);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int omap_clk_enable_autoidle_all(void)
|
|
|
+{
|
|
|
+ struct clk *c;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&clockfw_lock, flags);
|
|
|
+
|
|
|
+ list_for_each_entry(c, &clocks, node)
|
|
|
+ if (c->ops->allow_idle)
|
|
|
+ c->ops->allow_idle(c);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&clockfw_lock, flags);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int omap_clk_disable_autoidle_all(void)
|
|
|
+{
|
|
|
+ struct clk *c;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&clockfw_lock, flags);
|
|
|
+
|
|
|
+ list_for_each_entry(c, &clocks, node)
|
|
|
+ if (c->ops->deny_idle)
|
|
|
+ c->ops->deny_idle(c);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&clockfw_lock, flags);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Low level helpers
|
|
|
+ */
|
|
|
+static int clkll_enable_null(struct clk *clk)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void clkll_disable_null(struct clk *clk)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+const struct clkops clkops_null = {
|
|
|
+ .enable = clkll_enable_null,
|
|
|
+ .disable = clkll_disable_null,
|
|
|
+};
|
|
|
+
|
|
|
+/*
|
|
|
+ * Dummy clock
|
|
|
+ *
|
|
|
+ * Used for clock aliases that are needed on some OMAPs, but not others
|
|
|
+ */
|
|
|
+struct clk dummy_ck = {
|
|
|
+ .name = "dummy",
|
|
|
+ .ops = &clkops_null,
|
|
|
+};
|
|
|
+
|
|
|
+/*
|
|
|
+ *
|
|
|
+ */
|
|
|
+
|
|
|
+#ifdef CONFIG_OMAP_RESET_CLOCKS
|
|
|
+/*
|
|
|
+ * Disable any unused clocks left on by the bootloader
|
|
|
+ */
|
|
|
+static int __init clk_disable_unused(void)
|
|
|
+{
|
|
|
+ struct clk *ck;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ pr_info("clock: disabling unused clocks to save power\n");
|
|
|
+
|
|
|
+ spin_lock_irqsave(&clockfw_lock, flags);
|
|
|
+ list_for_each_entry(ck, &clocks, node) {
|
|
|
+ if (ck->ops == &clkops_null)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (ck->usecount > 0 || !ck->enable_reg)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ omap1_clk_disable_unused(ck);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&clockfw_lock, flags);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+late_initcall(clk_disable_unused);
|
|
|
+late_initcall(omap_clk_enable_autoidle_all);
|
|
|
+#endif
|