| /* |
| * Clock and PLL control for DaVinci devices |
| * |
| * Copyright (C) 2006-2007 Texas Instruments. |
| * Copyright (C) 2008-2009 Deep Root Systems, LLC |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License as published by |
| * the Free Software Foundation; either version 2 of the License, or |
| * (at your option) any later version. |
| */ |
| |
| #include <linux/module.h> |
| #include <linux/kernel.h> |
| #include <linux/init.h> |
| #include <linux/errno.h> |
| #include <linux/clk.h> |
| #include <linux/err.h> |
| #include <linux/mutex.h> |
| #include <linux/io.h> |
| #include <linux/delay.h> |
| |
| #include <mach/hardware.h> |
| |
| #include <mach/psc.h> |
| #include <mach/cputype.h> |
| #include "clock.h" |
| |
| static LIST_HEAD(clocks); |
| static DEFINE_MUTEX(clocks_mutex); |
| static DEFINE_SPINLOCK(clockfw_lock); |
| |
| static unsigned psc_domain(struct clk *clk) |
| { |
| return (clk->flags & PSC_DSP) |
| ? DAVINCI_GPSC_DSPDOMAIN |
| : DAVINCI_GPSC_ARMDOMAIN; |
| } |
| |
| static void __clk_enable(struct clk *clk) |
| { |
| if (clk->parent) |
| __clk_enable(clk->parent); |
| if (clk->usecount++ == 0 && (clk->flags & CLK_PSC)) |
| davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 1); |
| } |
| |
| static void __clk_disable(struct clk *clk) |
| { |
| if (WARN_ON(clk->usecount == 0)) |
| return; |
| if (--clk->usecount == 0 && !(clk->flags & CLK_PLL)) |
| davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 0); |
| if (clk->parent) |
| __clk_disable(clk->parent); |
| } |
| |
| int clk_enable(struct clk *clk) |
| { |
| unsigned long flags; |
| |
| if (clk == NULL || IS_ERR(clk)) |
| return -EINVAL; |
| |
| spin_lock_irqsave(&clockfw_lock, flags); |
| __clk_enable(clk); |
| spin_unlock_irqrestore(&clockfw_lock, flags); |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(clk_enable); |
| |
| void clk_disable(struct clk *clk) |
| { |
| unsigned long flags; |
| |
| if (clk == NULL || IS_ERR(clk)) |
| return; |
| |
| spin_lock_irqsave(&clockfw_lock, flags); |
| __clk_disable(clk); |
| spin_unlock_irqrestore(&clockfw_lock, flags); |
| } |
| EXPORT_SYMBOL(clk_disable); |
| |
| unsigned long clk_get_rate(struct clk *clk) |
| { |
| if (clk == NULL || IS_ERR(clk)) |
| return -EINVAL; |
| |
| return clk->rate; |
| } |
| EXPORT_SYMBOL(clk_get_rate); |
| |
| long clk_round_rate(struct clk *clk, unsigned long rate) |
| { |
| if (clk == NULL || IS_ERR(clk)) |
| return -EINVAL; |
| |
| if (clk->round_rate) |
| return clk->round_rate(clk, rate); |
| |
| return clk->rate; |
| } |
| EXPORT_SYMBOL(clk_round_rate); |
| |
| /* Propagate rate to children */ |
| static void propagate_rate(struct clk *root) |
| { |
| struct clk *clk; |
| |
| list_for_each_entry(clk, &root->children, childnode) { |
| if (clk->recalc) |
| clk->rate = clk->recalc(clk); |
| propagate_rate(clk); |
| } |
| } |
| |
| int clk_set_rate(struct clk *clk, unsigned long rate) |
| { |
| unsigned long flags; |
| int ret = -EINVAL; |
| |
| if (clk == NULL || IS_ERR(clk)) |
| return ret; |
| |
| spin_lock_irqsave(&clockfw_lock, flags); |
| if (clk->set_rate) |
| ret = clk->set_rate(clk, rate); |
| if (ret == 0) { |
| if (clk->recalc) |
| clk->rate = clk->recalc(clk); |
| propagate_rate(clk); |
| } |
| spin_unlock_irqrestore(&clockfw_lock, flags); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(clk_set_rate); |
| |
| int clk_set_parent(struct clk *clk, struct clk *parent) |
| { |
| unsigned long flags; |
| |
| if (clk == NULL || IS_ERR(clk)) |
| return -EINVAL; |
| |
| /* Cannot change parent on enabled clock */ |
| if (WARN_ON(clk->usecount)) |
| return -EINVAL; |
| |
| mutex_lock(&clocks_mutex); |
| clk->parent = parent; |
| list_del_init(&clk->childnode); |
| list_add(&clk->childnode, &clk->parent->children); |
| mutex_unlock(&clocks_mutex); |
| |
| spin_lock_irqsave(&clockfw_lock, flags); |
| if (clk->recalc) |
| clk->rate = clk->recalc(clk); |
| propagate_rate(clk); |
| spin_unlock_irqrestore(&clockfw_lock, flags); |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(clk_set_parent); |
| |
| int clk_register(struct clk *clk) |
| { |
| if (clk == NULL || IS_ERR(clk)) |
| return -EINVAL; |
| |
| if (WARN(clk->parent && !clk->parent->rate, |
| "CLK: %s parent %s has no rate!\n", |
| clk->name, clk->parent->name)) |
| return -EINVAL; |
| |
| INIT_LIST_HEAD(&clk->children); |
| |
| mutex_lock(&clocks_mutex); |
| list_add_tail(&clk->node, &clocks); |
| if (clk->parent) |
| list_add_tail(&clk->childnode, &clk->parent->children); |
| mutex_unlock(&clocks_mutex); |
| |
| /* If rate is already set, use it */ |
| if (clk->rate) |
| return 0; |
| |
| /* Else, see if there is a way to calculate it */ |
| if (clk->recalc) |
| clk->rate = clk->recalc(clk); |
| |
| /* Otherwise, default to parent rate */ |
| else if (clk->parent) |
| clk->rate = clk->parent->rate; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(clk_register); |
| |
| void clk_unregister(struct clk *clk) |
| { |
| if (clk == NULL || IS_ERR(clk)) |
| return; |
| |
| mutex_lock(&clocks_mutex); |
| list_del(&clk->node); |
| list_del(&clk->childnode); |
| mutex_unlock(&clocks_mutex); |
| } |
| EXPORT_SYMBOL(clk_unregister); |
| |
| #ifdef CONFIG_DAVINCI_RESET_CLOCKS |
| /* |
| * Disable any unused clocks left on by the bootloader |
| */ |
| static int __init clk_disable_unused(void) |
| { |
| struct clk *ck; |
| |
| spin_lock_irq(&clockfw_lock); |
| list_for_each_entry(ck, &clocks, node) { |
| if (ck->usecount > 0) |
| continue; |
| if (!(ck->flags & CLK_PSC)) |
| continue; |
| |
| /* ignore if in Disabled or SwRstDisable states */ |
| if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc)) |
| continue; |
| |
| pr_info("Clocks: disable unused %s\n", ck->name); |
| davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc, 0); |
| } |
| spin_unlock_irq(&clockfw_lock); |
| |
| return 0; |
| } |
| late_initcall(clk_disable_unused); |
| #endif |
| |
| static unsigned long clk_sysclk_recalc(struct clk *clk) |
| { |
| u32 v, plldiv; |
| struct pll_data *pll; |
| unsigned long rate = clk->rate; |
| |
| /* If this is the PLL base clock, no more calculations needed */ |
| if (clk->pll_data) |
| return rate; |
| |
| if (WARN_ON(!clk->parent)) |
| return rate; |
| |
| rate = clk->parent->rate; |
| |
| /* Otherwise, the parent must be a PLL */ |
| if (WARN_ON(!clk->parent->pll_data)) |
| return rate; |
| |
| pll = clk->parent->pll_data; |
| |
| /* If pre-PLL, source clock is before the multiplier and divider(s) */ |
| if (clk->flags & PRE_PLL) |
| rate = pll->input_rate; |
| |
| if (!clk->div_reg) |
| return rate; |
| |
| v = __raw_readl(pll->base + clk->div_reg); |
| if (v & PLLDIV_EN) { |
| plldiv = (v & PLLDIV_RATIO_MASK) + 1; |
| if (plldiv) |
| rate /= plldiv; |
| } |
| |
| return rate; |
| } |
| |
| static unsigned long clk_leafclk_recalc(struct clk *clk) |
| { |
| if (WARN_ON(!clk->parent)) |
| return clk->rate; |
| |
| return clk->parent->rate; |
| } |
| |
| static unsigned long clk_pllclk_recalc(struct clk *clk) |
| { |
| u32 ctrl, mult = 1, prediv = 1, postdiv = 1; |
| u8 bypass; |
| struct pll_data *pll = clk->pll_data; |
| unsigned long rate = clk->rate; |
| |
| pll->base = IO_ADDRESS(pll->phys_base); |
| ctrl = __raw_readl(pll->base + PLLCTL); |
| rate = pll->input_rate = clk->parent->rate; |
| |
| if (ctrl & PLLCTL_PLLEN) { |
| bypass = 0; |
| mult = __raw_readl(pll->base + PLLM); |
| if (cpu_is_davinci_dm365()) |
| mult = 2 * (mult & PLLM_PLLM_MASK); |
| else |
| mult = (mult & PLLM_PLLM_MASK) + 1; |
| } else |
| bypass = 1; |
| |
| if (pll->flags & PLL_HAS_PREDIV) { |
| prediv = __raw_readl(pll->base + PREDIV); |
| if (prediv & PLLDIV_EN) |
| prediv = (prediv & PLLDIV_RATIO_MASK) + 1; |
| else |
| prediv = 1; |
| } |
| |
| /* pre-divider is fixed, but (some?) chips won't report that */ |
| if (cpu_is_davinci_dm355() && pll->num == 1) |
| prediv = 8; |
| |
| if (pll->flags & PLL_HAS_POSTDIV) { |
| postdiv = __raw_readl(pll->base + POSTDIV); |
| if (postdiv & PLLDIV_EN) |
| postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1; |
| else |
| postdiv = 1; |
| } |
| |
| if (!bypass) { |
| rate /= prediv; |
| rate *= mult; |
| rate /= postdiv; |
| } |
| |
| pr_debug("PLL%d: input = %lu MHz [ ", |
| pll->num, clk->parent->rate / 1000000); |
| if (bypass) |
| pr_debug("bypass "); |
| if (prediv > 1) |
| pr_debug("/ %d ", prediv); |
| if (mult > 1) |
| pr_debug("* %d ", mult); |
| if (postdiv > 1) |
| pr_debug("/ %d ", postdiv); |
| pr_debug("] --> %lu MHz output.\n", rate / 1000000); |
| |
| return rate; |
| } |
| |
| /** |
| * davinci_set_pllrate - set the output rate of a given PLL. |
| * |
| * Note: Currently tested to work with OMAP-L138 only. |
| * |
| * @pll: pll whose rate needs to be changed. |
| * @prediv: The pre divider value. Passing 0 disables the pre-divider. |
| * @pllm: The multiplier value. Passing 0 leads to multiply-by-one. |
| * @postdiv: The post divider value. Passing 0 disables the post-divider. |
| */ |
| int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv, |
| unsigned int mult, unsigned int postdiv) |
| { |
| u32 ctrl; |
| unsigned int locktime; |
| |
| if (pll->base == NULL) |
| return -EINVAL; |
| |
| /* |
| * PLL lock time required per OMAP-L138 datasheet is |
| * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm) |
| * as 4 and OSCIN cycle as 25 MHz. |
| */ |
| if (prediv) { |
| locktime = ((2000 * prediv) / 100); |
| prediv = (prediv - 1) | PLLDIV_EN; |
| } else { |
| locktime = 20; |
| } |
| if (postdiv) |
| postdiv = (postdiv - 1) | PLLDIV_EN; |
| if (mult) |
| mult = mult - 1; |
| |
| ctrl = __raw_readl(pll->base + PLLCTL); |
| |
| /* Switch the PLL to bypass mode */ |
| ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN); |
| __raw_writel(ctrl, pll->base + PLLCTL); |
| |
| /* |
| * Wait for 4 OSCIN/CLKIN cycles to ensure that the PLLC has switched |
| * to bypass mode. Delay of 1us ensures we are good for all > 4MHz |
| * OSCIN/CLKIN inputs. Typically the input is ~25MHz. |
| */ |
| udelay(1); |
| |
| /* Reset and enable PLL */ |
| ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS); |
| __raw_writel(ctrl, pll->base + PLLCTL); |
| |
| if (pll->flags & PLL_HAS_PREDIV) |
| __raw_writel(prediv, pll->base + PREDIV); |
| |
| __raw_writel(mult, pll->base + PLLM); |
| |
| if (pll->flags & PLL_HAS_POSTDIV) |
| __raw_writel(postdiv, pll->base + POSTDIV); |
| |
| /* |
| * Wait for PLL to reset properly, OMAP-L138 datasheet says |
| * 'min' time = 125ns |
| */ |
| udelay(1); |
| |
| /* Bring PLL out of reset */ |
| ctrl |= PLLCTL_PLLRST; |
| __raw_writel(ctrl, pll->base + PLLCTL); |
| |
| udelay(locktime); |
| |
| /* Remove PLL from bypass mode */ |
| ctrl |= PLLCTL_PLLEN; |
| __raw_writel(ctrl, pll->base + PLLCTL); |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(davinci_set_pllrate); |
| |
| int __init davinci_clk_init(struct davinci_clk *clocks) |
| { |
| struct davinci_clk *c; |
| struct clk *clk; |
| |
| for (c = clocks; c->lk.clk; c++) { |
| clk = c->lk.clk; |
| |
| if (!clk->recalc) { |
| |
| /* Check if clock is a PLL */ |
| if (clk->pll_data) |
| clk->recalc = clk_pllclk_recalc; |
| |
| /* Else, if it is a PLL-derived clock */ |
| else if (clk->flags & CLK_PLL) |
| clk->recalc = clk_sysclk_recalc; |
| |
| /* Otherwise, it is a leaf clock (PSC clock) */ |
| else if (clk->parent) |
| clk->recalc = clk_leafclk_recalc; |
| } |
| |
| if (clk->recalc) |
| clk->rate = clk->recalc(clk); |
| |
| if (clk->lpsc) |
| clk->flags |= CLK_PSC; |
| |
| clkdev_add(&c->lk); |
| clk_register(clk); |
| |
| /* Turn on clocks that Linux doesn't otherwise manage */ |
| if (clk->flags & ALWAYS_ENABLED) |
| clk_enable(clk); |
| } |
| |
| return 0; |
| } |
| |
| #ifdef CONFIG_PROC_FS |
| #include <linux/proc_fs.h> |
| #include <linux/seq_file.h> |
| |
| static void *davinci_ck_start(struct seq_file *m, loff_t *pos) |
| { |
| return *pos < 1 ? (void *)1 : NULL; |
| } |
| |
| static void *davinci_ck_next(struct seq_file *m, void *v, loff_t *pos) |
| { |
| ++*pos; |
| return NULL; |
| } |
| |
| static void davinci_ck_stop(struct seq_file *m, void *v) |
| { |
| } |
| |
| #define CLKNAME_MAX 10 /* longest clock name */ |
| #define NEST_DELTA 2 |
| #define NEST_MAX 4 |
| |
| static void |
| dump_clock(struct seq_file *s, unsigned nest, struct clk *parent) |
| { |
| char *state; |
| char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX]; |
| struct clk *clk; |
| unsigned i; |
| |
| if (parent->flags & CLK_PLL) |
| state = "pll"; |
| else if (parent->flags & CLK_PSC) |
| state = "psc"; |
| else |
| state = ""; |
| |
| /* <nest spaces> name <pad to end> */ |
| memset(buf, ' ', sizeof(buf) - 1); |
| buf[sizeof(buf) - 1] = 0; |
| i = strlen(parent->name); |
| memcpy(buf + nest, parent->name, |
| min(i, (unsigned)(sizeof(buf) - 1 - nest))); |
| |
| seq_printf(s, "%s users=%2d %-3s %9ld Hz\n", |
| buf, parent->usecount, state, clk_get_rate(parent)); |
| /* REVISIT show device associations too */ |
| |
| /* cost is now small, but not linear... */ |
| list_for_each_entry(clk, &parent->children, childnode) { |
| dump_clock(s, nest + NEST_DELTA, clk); |
| } |
| } |
| |
| static int davinci_ck_show(struct seq_file *m, void *v) |
| { |
| /* Show clock tree; we know the main oscillator is first. |
| * We trust nonzero usecounts equate to PSC enables... |
| */ |
| mutex_lock(&clocks_mutex); |
| if (!list_empty(&clocks)) |
| dump_clock(m, 0, list_first_entry(&clocks, struct clk, node)); |
| mutex_unlock(&clocks_mutex); |
| |
| return 0; |
| } |
| |
| static const struct seq_operations davinci_ck_op = { |
| .start = davinci_ck_start, |
| .next = davinci_ck_next, |
| .stop = davinci_ck_stop, |
| .show = davinci_ck_show |
| }; |
| |
| static int davinci_ck_open(struct inode *inode, struct file *file) |
| { |
| return seq_open(file, &davinci_ck_op); |
| } |
| |
| static const struct file_operations proc_davinci_ck_operations = { |
| .open = davinci_ck_open, |
| .read = seq_read, |
| .llseek = seq_lseek, |
| .release = seq_release, |
| }; |
| |
| static int __init davinci_ck_proc_init(void) |
| { |
| proc_create("davinci_clocks", 0, NULL, &proc_davinci_ck_operations); |
| return 0; |
| |
| } |
| __initcall(davinci_ck_proc_init); |
| #endif /* CONFIG_DEBUG_PROC_FS */ |