Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 2 | /* |
| 3 | * OMAP3/4 - specific DPLL control functions |
| 4 | * |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 5 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
| 6 | * Copyright (C) 2009-2010 Nokia Corporation |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 7 | * |
| 8 | * Written by Paul Walmsley |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 9 | * Testing and integration fixes by Jouni Högander |
| 10 | * |
| 11 | * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth |
| 12 | * Menon |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 13 | * |
| 14 | * Parts of this code are based on code written by |
| 15 | * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 16 | */ |
| 17 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/device.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/errno.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/clk.h> |
| 24 | #include <linux/io.h> |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 25 | #include <linux/bitops.h> |
Jean-Christop PLAGNIOL-VILLARD | 6d803ba | 2010-11-17 10:04:33 +0100 | [diff] [blame] | 26 | #include <linux/clkdev.h> |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 27 | #include <linux/clk/ti.h> |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 28 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 29 | #include "clock.h" |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 30 | |
| 31 | /* CM_AUTOIDLE_PLL*.AUTO_* bit values */ |
| 32 | #define DPLL_AUTOIDLE_DISABLE 0x0 |
| 33 | #define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 |
| 34 | |
| 35 | #define MAX_DPLL_WAIT_TRIES 1000000 |
| 36 | |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 37 | #define OMAP3XXX_EN_DPLL_LOCKED 0x7 |
| 38 | |
| 39 | /* Forward declarations */ |
| 40 | static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk); |
| 41 | static void omap3_dpll_deny_idle(struct clk_hw_omap *clk); |
| 42 | static void omap3_dpll_allow_idle(struct clk_hw_omap *clk); |
| 43 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 44 | /* Private functions */ |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 45 | |
| 46 | /* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 47 | static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 48 | { |
| 49 | const struct dpll_data *dd; |
| 50 | u32 v; |
| 51 | |
| 52 | dd = clk->dpll_data; |
| 53 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 54 | v = ti_clk_ll_ops->clk_readl(&dd->control_reg); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 55 | v &= ~dd->enable_mask; |
| 56 | v |= clken_bits << __ffs(dd->enable_mask); |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 57 | ti_clk_ll_ops->clk_writel(v, &dd->control_reg); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 61 | static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 62 | { |
| 63 | const struct dpll_data *dd; |
| 64 | int i = 0; |
| 65 | int ret = -EINVAL; |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 66 | const char *clk_name; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 67 | |
| 68 | dd = clk->dpll_data; |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 69 | clk_name = clk_hw_get_name(&clk->hw); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 70 | |
| 71 | state <<= __ffs(dd->idlest_mask); |
| 72 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 73 | while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) |
Tero Kristo | 519ab8b | 2013-10-22 11:49:58 +0300 | [diff] [blame] | 74 | != state) && i < MAX_DPLL_WAIT_TRIES) { |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 75 | i++; |
| 76 | udelay(1); |
| 77 | } |
| 78 | |
| 79 | if (i == MAX_DPLL_WAIT_TRIES) { |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 80 | pr_err("clock: %s failed transition to '%s'\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 81 | clk_name, (state) ? "locked" : "bypassed"); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 82 | } else { |
| 83 | pr_debug("clock: %s transition to '%s' in %d loops\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 84 | clk_name, (state) ? "locked" : "bypassed", i); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 85 | |
| 86 | ret = 0; |
| 87 | } |
| 88 | |
| 89 | return ret; |
| 90 | } |
| 91 | |
| 92 | /* From 3430 TRM ES2 4.7.6.2 */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 93 | static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 94 | { |
| 95 | unsigned long fint; |
| 96 | u16 f = 0; |
| 97 | |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 98 | fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 99 | |
| 100 | pr_debug("clock: fint is %lu\n", fint); |
| 101 | |
| 102 | if (fint >= 750000 && fint <= 1000000) |
| 103 | f = 0x3; |
| 104 | else if (fint > 1000000 && fint <= 1250000) |
| 105 | f = 0x4; |
| 106 | else if (fint > 1250000 && fint <= 1500000) |
| 107 | f = 0x5; |
| 108 | else if (fint > 1500000 && fint <= 1750000) |
| 109 | f = 0x6; |
| 110 | else if (fint > 1750000 && fint <= 2100000) |
| 111 | f = 0x7; |
| 112 | else if (fint > 7500000 && fint <= 10000000) |
| 113 | f = 0xB; |
| 114 | else if (fint > 10000000 && fint <= 12500000) |
| 115 | f = 0xC; |
| 116 | else if (fint > 12500000 && fint <= 15000000) |
| 117 | f = 0xD; |
| 118 | else if (fint > 15000000 && fint <= 17500000) |
| 119 | f = 0xE; |
| 120 | else if (fint > 17500000 && fint <= 21000000) |
| 121 | f = 0xF; |
| 122 | else |
| 123 | pr_debug("clock: unknown freqsel setting for %d\n", n); |
| 124 | |
| 125 | return f; |
| 126 | } |
| 127 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 128 | /* |
| 129 | * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness |
| 130 | * @clk: pointer to a DPLL struct clk |
| 131 | * |
| 132 | * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report |
| 133 | * readiness before returning. Will save and restore the DPLL's |
| 134 | * autoidle state across the enable, per the CDP code. If the DPLL |
| 135 | * locked successfully, return 0; if the DPLL did not lock in the time |
| 136 | * allotted, or DPLL3 was passed in, return -EINVAL. |
| 137 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 138 | static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 139 | { |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 140 | const struct dpll_data *dd; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 141 | u8 ai; |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 142 | u8 state = 1; |
| 143 | int r = 0; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 144 | |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 145 | pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw)); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 146 | |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 147 | dd = clk->dpll_data; |
| 148 | state <<= __ffs(dd->idlest_mask); |
| 149 | |
| 150 | /* Check if already locked */ |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 151 | if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) == |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 152 | state) |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 153 | goto done; |
| 154 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 155 | ai = omap3_dpll_autoidle_read(clk); |
| 156 | |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 157 | if (ai) |
| 158 | omap3_dpll_deny_idle(clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 159 | |
| 160 | _omap3_dpll_write_clken(clk, DPLL_LOCKED); |
| 161 | |
| 162 | r = _omap3_wait_dpll_status(clk, 1); |
| 163 | |
| 164 | if (ai) |
| 165 | omap3_dpll_allow_idle(clk); |
| 166 | |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 167 | done: |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 168 | return r; |
| 169 | } |
| 170 | |
| 171 | /* |
| 172 | * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness |
| 173 | * @clk: pointer to a DPLL struct clk |
| 174 | * |
| 175 | * Instructs a non-CORE DPLL to enter low-power bypass mode. In |
| 176 | * bypass mode, the DPLL's rate is set equal to its parent clock's |
| 177 | * rate. Waits for the DPLL to report readiness before returning. |
| 178 | * Will save and restore the DPLL's autoidle state across the enable, |
| 179 | * per the CDP code. If the DPLL entered bypass mode successfully, |
| 180 | * return 0; if the DPLL did not enter bypass in the time allotted, or |
| 181 | * DPLL3 was passed in, or the DPLL does not support low-power bypass, |
| 182 | * return -EINVAL. |
| 183 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 184 | static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 185 | { |
| 186 | int r; |
| 187 | u8 ai; |
| 188 | |
| 189 | if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) |
| 190 | return -EINVAL; |
| 191 | |
| 192 | pr_debug("clock: configuring DPLL %s for low-power bypass\n", |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 193 | clk_hw_get_name(&clk->hw)); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 194 | |
| 195 | ai = omap3_dpll_autoidle_read(clk); |
| 196 | |
| 197 | _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); |
| 198 | |
| 199 | r = _omap3_wait_dpll_status(clk, 0); |
| 200 | |
| 201 | if (ai) |
| 202 | omap3_dpll_allow_idle(clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 203 | |
| 204 | return r; |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * _omap3_noncore_dpll_stop - instruct a DPLL to stop |
| 209 | * @clk: pointer to a DPLL struct clk |
| 210 | * |
| 211 | * Instructs a non-CORE DPLL to enter low-power stop. Will save and |
| 212 | * restore the DPLL's autoidle state across the stop, per the CDP |
| 213 | * code. If DPLL3 was passed in, or the DPLL does not support |
| 214 | * low-power stop, return -EINVAL; otherwise, return 0. |
| 215 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 216 | static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 217 | { |
| 218 | u8 ai; |
| 219 | |
| 220 | if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) |
| 221 | return -EINVAL; |
| 222 | |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 223 | pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw)); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 224 | |
| 225 | ai = omap3_dpll_autoidle_read(clk); |
| 226 | |
| 227 | _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); |
| 228 | |
| 229 | if (ai) |
| 230 | omap3_dpll_allow_idle(clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 231 | |
| 232 | return 0; |
| 233 | } |
| 234 | |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 235 | /** |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 236 | * _lookup_dco - Lookup DCO used by j-type DPLL |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 237 | * @clk: pointer to a DPLL struct clk |
| 238 | * @dco: digital control oscillator selector |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 239 | * @m: DPLL multiplier to set |
| 240 | * @n: DPLL divider to set |
| 241 | * |
| 242 | * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" |
| 243 | * |
| 244 | * XXX This code is not needed for 3430/AM35xx; can it be optimized |
| 245 | * out in non-multi-OMAP builds for those chips? |
| 246 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 247 | static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 248 | { |
| 249 | unsigned long fint, clkinp; /* watch out for overflow */ |
| 250 | |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 251 | clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 252 | fint = (clkinp / n) * m; |
| 253 | |
| 254 | if (fint < 1000000000) |
| 255 | *dco = 2; |
| 256 | else |
| 257 | *dco = 4; |
| 258 | } |
| 259 | |
| 260 | /** |
| 261 | * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL |
| 262 | * @clk: pointer to a DPLL struct clk |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 263 | * @sd_div: target sigma-delta divider |
| 264 | * @m: DPLL multiplier to set |
| 265 | * @n: DPLL divider to set |
| 266 | * |
| 267 | * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" |
| 268 | * |
| 269 | * XXX This code is not needed for 3430/AM35xx; can it be optimized |
| 270 | * out in non-multi-OMAP builds for those chips? |
| 271 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 272 | static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 273 | { |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 274 | unsigned long clkinp, sd; /* watch out for overflow */ |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 275 | int mod1, mod2; |
| 276 | |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 277 | clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 278 | |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 279 | /* |
| 280 | * target sigma-delta to near 250MHz |
| 281 | * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] |
| 282 | */ |
| 283 | clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ |
| 284 | mod1 = (clkinp * m) % (250 * n); |
| 285 | sd = (clkinp * m) / (250 * n); |
| 286 | mod2 = sd % 10; |
| 287 | sd /= 10; |
| 288 | |
| 289 | if (mod1 || mod2) |
| 290 | sd++; |
| 291 | *sd_div = sd; |
| 292 | } |
| 293 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 294 | /* |
| 295 | * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 296 | * @clk: struct clk * of DPLL to set |
| 297 | * @freqsel: FREQSEL value to set |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 298 | * |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 299 | * Program the DPLL with the last M, N values calculated, and wait for |
| 300 | * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success. |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 301 | */ |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 302 | static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 303 | { |
| 304 | struct dpll_data *dd = clk->dpll_data; |
Tero Kristo | 07ff73a | 2015-11-30 16:43:25 +0200 | [diff] [blame] | 305 | u8 dco, sd_div, ai = 0; |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 306 | u32 v; |
Tero Kristo | 07ff73a | 2015-11-30 16:43:25 +0200 | [diff] [blame] | 307 | bool errata_i810; |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 308 | |
| 309 | /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ |
| 310 | _omap3_noncore_dpll_bypass(clk); |
| 311 | |
Vishwanath BS | 5eb75f5 | 2010-02-24 12:05:57 -0700 | [diff] [blame] | 312 | /* |
Rajendra Nayak | ecf5164 | 2013-01-29 18:33:49 +0530 | [diff] [blame] | 313 | * Set jitter correction. Jitter correction applicable for OMAP343X |
| 314 | * only since freqsel field is no longer present on other devices. |
Vishwanath BS | 5eb75f5 | 2010-02-24 12:05:57 -0700 | [diff] [blame] | 315 | */ |
Tero Kristo | f3b19aa | 2015-02-27 17:54:14 +0200 | [diff] [blame] | 316 | if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 317 | v = ti_clk_ll_ops->clk_readl(&dd->control_reg); |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 318 | v &= ~dd->freqsel_mask; |
| 319 | v |= freqsel << __ffs(dd->freqsel_mask); |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 320 | ti_clk_ll_ops->clk_writel(v, &dd->control_reg); |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | /* Set DPLL multiplier, divider */ |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 324 | v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); |
Andrii Tseglytskyi | ce369a5 | 2014-05-16 05:45:58 -0500 | [diff] [blame] | 325 | |
| 326 | /* Handle Duty Cycle Correction */ |
| 327 | if (dd->dcc_mask) { |
| 328 | if (dd->last_rounded_rate >= dd->dcc_rate) |
| 329 | v |= dd->dcc_mask; /* Enable DCC */ |
| 330 | else |
| 331 | v &= ~dd->dcc_mask; /* Disable DCC */ |
| 332 | } |
| 333 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 334 | v &= ~(dd->mult_mask | dd->div1_mask); |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 335 | v |= dd->last_rounded_m << __ffs(dd->mult_mask); |
| 336 | v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 337 | |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 338 | /* Configure dco and sd_div for dplls that have these fields */ |
| 339 | if (dd->dco_mask) { |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 340 | _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n); |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 341 | v &= ~(dd->dco_mask); |
| 342 | v |= dco << __ffs(dd->dco_mask); |
| 343 | } |
| 344 | if (dd->sddiv_mask) { |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 345 | _lookup_sddiv(clk, &sd_div, dd->last_rounded_m, |
| 346 | dd->last_rounded_n); |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 347 | v &= ~(dd->sddiv_mask); |
| 348 | v |= sd_div << __ffs(dd->sddiv_mask); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 349 | } |
| 350 | |
Tero Kristo | 07ff73a | 2015-11-30 16:43:25 +0200 | [diff] [blame] | 351 | /* |
| 352 | * Errata i810 - DPLL controller can get stuck while transitioning |
| 353 | * to a power saving state. Software must ensure the DPLL can not |
| 354 | * transition to a low power state while changing M/N values. |
| 355 | * Easiest way to accomplish this is to prevent DPLL autoidle |
| 356 | * before doing the M/N re-program. |
| 357 | */ |
| 358 | errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810; |
| 359 | |
| 360 | if (errata_i810) { |
| 361 | ai = omap3_dpll_autoidle_read(clk); |
| 362 | if (ai) { |
| 363 | omap3_dpll_deny_idle(clk); |
| 364 | |
| 365 | /* OCP barrier */ |
| 366 | omap3_dpll_autoidle_read(clk); |
| 367 | } |
| 368 | } |
| 369 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 370 | ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg); |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 371 | |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 372 | /* Set 4X multiplier and low-power mode */ |
| 373 | if (dd->m4xen_mask || dd->lpmode_mask) { |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 374 | v = ti_clk_ll_ops->clk_readl(&dd->control_reg); |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 375 | |
| 376 | if (dd->m4xen_mask) { |
| 377 | if (dd->last_rounded_m4xen) |
| 378 | v |= dd->m4xen_mask; |
| 379 | else |
| 380 | v &= ~dd->m4xen_mask; |
| 381 | } |
| 382 | |
| 383 | if (dd->lpmode_mask) { |
| 384 | if (dd->last_rounded_lpmode) |
| 385 | v |= dd->lpmode_mask; |
| 386 | else |
| 387 | v &= ~dd->lpmode_mask; |
| 388 | } |
| 389 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 390 | ti_clk_ll_ops->clk_writel(v, &dd->control_reg); |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 391 | } |
| 392 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 393 | /* We let the clock framework set the other output dividers later */ |
| 394 | |
| 395 | /* REVISIT: Set ramp-up delay? */ |
| 396 | |
| 397 | _omap3_noncore_dpll_lock(clk); |
| 398 | |
Tero Kristo | 07ff73a | 2015-11-30 16:43:25 +0200 | [diff] [blame] | 399 | if (errata_i810 && ai) |
| 400 | omap3_dpll_allow_idle(clk); |
| 401 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 402 | return 0; |
| 403 | } |
| 404 | |
| 405 | /* Public functions */ |
| 406 | |
| 407 | /** |
| 408 | * omap3_dpll_recalc - recalculate DPLL rate |
| 409 | * @clk: DPLL struct clk |
| 410 | * |
| 411 | * Recalculate and propagate the DPLL rate. |
| 412 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 413 | unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate) |
| 414 | { |
| 415 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Paul Walmsley | 455db9c | 2012-11-10 19:32:46 -0700 | [diff] [blame] | 416 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 417 | return omap2_get_dpll_rate(clk); |
| 418 | } |
| 419 | |
| 420 | /* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ |
| 421 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 422 | /** |
| 423 | * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode |
| 424 | * @clk: pointer to a DPLL struct clk |
| 425 | * |
| 426 | * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. |
| 427 | * The choice of modes depends on the DPLL's programmed rate: if it is |
| 428 | * the same as the DPLL's parent clock, it will enter bypass; |
| 429 | * otherwise, it will enter lock. This code will wait for the DPLL to |
| 430 | * indicate readiness before returning, unless the DPLL takes too long |
| 431 | * to enter the target state. Intended to be used as the struct clk's |
| 432 | * enable function. If DPLL3 was passed in, or the DPLL does not |
| 433 | * support low-power stop, or if the DPLL took too long to enter |
| 434 | * bypass or lock, return -EINVAL; otherwise, return 0. |
| 435 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 436 | int omap3_noncore_dpll_enable(struct clk_hw *hw) |
| 437 | { |
| 438 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 439 | int r; |
| 440 | struct dpll_data *dd; |
Tomeu Vizoso | 035a61c | 2015-01-23 12:03:30 +0100 | [diff] [blame] | 441 | struct clk_hw *parent; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 442 | |
| 443 | dd = clk->dpll_data; |
| 444 | if (!dd) |
| 445 | return -EINVAL; |
| 446 | |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 447 | if (clk->clkdm) { |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 448 | r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 449 | if (r) { |
| 450 | WARN(1, |
| 451 | "%s: could not enable %s's clockdomain %s: %d\n", |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 452 | __func__, clk_hw_get_name(hw), |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 453 | clk->clkdm_name, r); |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 454 | return r; |
| 455 | } |
| 456 | } |
| 457 | |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 458 | parent = clk_hw_get_parent(hw); |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 459 | |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 460 | if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) { |
| 461 | WARN_ON(parent != dd->clk_bypass); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 462 | r = _omap3_noncore_dpll_bypass(clk); |
| 463 | } else { |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 464 | WARN_ON(parent != dd->clk_ref); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 465 | r = _omap3_noncore_dpll_lock(clk); |
| 466 | } |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 467 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 468 | return r; |
| 469 | } |
| 470 | |
| 471 | /** |
| 472 | * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop |
| 473 | * @clk: pointer to a DPLL struct clk |
| 474 | * |
| 475 | * Instructs a non-CORE DPLL to enter low-power stop. This function is |
| 476 | * intended for use in struct clkops. No return value. |
| 477 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 478 | void omap3_noncore_dpll_disable(struct clk_hw *hw) |
| 479 | { |
| 480 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 481 | |
| 482 | _omap3_noncore_dpll_stop(clk); |
| 483 | if (clk->clkdm) |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 484 | ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 485 | } |
| 486 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 487 | /* Non-CORE DPLL rate set code */ |
| 488 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 489 | /** |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 490 | * omap3_noncore_dpll_determine_rate - determine rate for a DPLL |
| 491 | * @hw: pointer to the clock to determine rate for |
Boris Brezillon | 0817b62 | 2015-07-07 20:48:08 +0200 | [diff] [blame] | 492 | * @req: target rate request |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 493 | * |
| 494 | * Determines which DPLL mode to use for reaching a desired target rate. |
| 495 | * Checks whether the DPLL shall be in bypass or locked mode, and if |
| 496 | * locked, calculates the M,N values for the DPLL via round-rate. |
Boris Brezillon | 0817b62 | 2015-07-07 20:48:08 +0200 | [diff] [blame] | 497 | * Returns a 0 on success, negative error value in failure. |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 498 | */ |
Boris Brezillon | 0817b62 | 2015-07-07 20:48:08 +0200 | [diff] [blame] | 499 | int omap3_noncore_dpll_determine_rate(struct clk_hw *hw, |
| 500 | struct clk_rate_request *req) |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 501 | { |
| 502 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 503 | struct dpll_data *dd; |
| 504 | |
Boris Brezillon | 0817b62 | 2015-07-07 20:48:08 +0200 | [diff] [blame] | 505 | if (!req->rate) |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 506 | return -EINVAL; |
| 507 | |
| 508 | dd = clk->dpll_data; |
| 509 | if (!dd) |
| 510 | return -EINVAL; |
| 511 | |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 512 | if (clk_hw_get_rate(dd->clk_bypass) == req->rate && |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 513 | (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 514 | req->best_parent_hw = dd->clk_bypass; |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 515 | } else { |
Boris Brezillon | 0817b62 | 2015-07-07 20:48:08 +0200 | [diff] [blame] | 516 | req->rate = omap2_dpll_round_rate(hw, req->rate, |
| 517 | &req->best_parent_rate); |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 518 | req->best_parent_hw = dd->clk_ref; |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 519 | } |
| 520 | |
Boris Brezillon | 0817b62 | 2015-07-07 20:48:08 +0200 | [diff] [blame] | 521 | req->best_parent_rate = req->rate; |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 522 | |
Boris Brezillon | 0817b62 | 2015-07-07 20:48:08 +0200 | [diff] [blame] | 523 | return 0; |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 524 | } |
| 525 | |
| 526 | /** |
| 527 | * omap3_noncore_dpll_set_parent - set parent for a DPLL clock |
| 528 | * @hw: pointer to the clock to set parent for |
| 529 | * @index: parent index to select |
| 530 | * |
| 531 | * Sets parent for a DPLL clock. This sets the DPLL into bypass or |
| 532 | * locked mode. Returns 0 with success, negative error value otherwise. |
| 533 | */ |
| 534 | int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index) |
| 535 | { |
| 536 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 537 | int ret; |
| 538 | |
| 539 | if (!hw) |
| 540 | return -EINVAL; |
| 541 | |
| 542 | if (index) |
| 543 | ret = _omap3_noncore_dpll_bypass(clk); |
| 544 | else |
| 545 | ret = _omap3_noncore_dpll_lock(clk); |
| 546 | |
| 547 | return ret; |
| 548 | } |
| 549 | |
| 550 | /** |
Tero Kristo | 2e1a7b0 | 2014-10-03 16:57:14 +0300 | [diff] [blame] | 551 | * omap3_noncore_dpll_set_rate - set rate for a DPLL clock |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 552 | * @hw: pointer to the clock to set parent for |
| 553 | * @rate: target rate for the clock |
| 554 | * @parent_rate: rate of the parent clock |
| 555 | * |
| 556 | * Sets rate for a DPLL clock. First checks if the clock parent is |
| 557 | * reference clock (in bypass mode, the rate of the clock can't be |
| 558 | * changed) and proceeds with the rate change operation. Returns 0 |
| 559 | * with success, negative error value otherwise. |
| 560 | */ |
Tero Kristo | 2e1a7b0 | 2014-10-03 16:57:14 +0300 | [diff] [blame] | 561 | int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, |
| 562 | unsigned long parent_rate) |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 563 | { |
| 564 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 565 | struct dpll_data *dd; |
| 566 | u16 freqsel = 0; |
| 567 | int ret; |
| 568 | |
| 569 | if (!hw || !rate) |
| 570 | return -EINVAL; |
| 571 | |
| 572 | dd = clk->dpll_data; |
| 573 | if (!dd) |
| 574 | return -EINVAL; |
| 575 | |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 576 | if (clk_hw_get_parent(hw) != dd->clk_ref) |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 577 | return -EINVAL; |
| 578 | |
| 579 | if (dd->last_rounded_rate == 0) |
| 580 | return -EINVAL; |
| 581 | |
| 582 | /* Freqsel is available only on OMAP343X devices */ |
Tero Kristo | f3b19aa | 2015-02-27 17:54:14 +0200 | [diff] [blame] | 583 | if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 584 | freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); |
| 585 | WARN_ON(!freqsel); |
| 586 | } |
| 587 | |
| 588 | pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 589 | clk_hw_get_name(hw), rate); |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 590 | |
| 591 | ret = omap3_noncore_dpll_program(clk, freqsel); |
| 592 | |
| 593 | return ret; |
| 594 | } |
| 595 | |
| 596 | /** |
| 597 | * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock |
| 598 | * @hw: pointer to the clock to set rate and parent for |
| 599 | * @rate: target rate for the DPLL |
| 600 | * @parent_rate: clock rate of the DPLL parent |
| 601 | * @index: new parent index for the DPLL, 0 - reference, 1 - bypass |
| 602 | * |
| 603 | * Sets rate and parent for a DPLL clock. If new parent is the bypass |
| 604 | * clock, only selects the parent. Otherwise proceeds with a rate |
| 605 | * change, as this will effectively also change the parent as the |
| 606 | * DPLL is put into locked mode. Returns 0 with success, negative error |
| 607 | * value otherwise. |
| 608 | */ |
| 609 | int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, |
| 610 | unsigned long rate, |
| 611 | unsigned long parent_rate, |
| 612 | u8 index) |
| 613 | { |
| 614 | int ret; |
| 615 | |
| 616 | if (!hw || !rate) |
| 617 | return -EINVAL; |
| 618 | |
| 619 | /* |
| 620 | * clk-ref at index[0], in which case we only need to set rate, |
| 621 | * the parent will be changed automatically with the lock sequence. |
| 622 | * With clk-bypass case we only need to change parent. |
| 623 | */ |
| 624 | if (index) |
| 625 | ret = omap3_noncore_dpll_set_parent(hw, index); |
| 626 | else |
Tero Kristo | 2e1a7b0 | 2014-10-03 16:57:14 +0300 | [diff] [blame] | 627 | ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate); |
Tero Kristo | d539efa | 2014-10-03 16:57:11 +0300 | [diff] [blame] | 628 | |
| 629 | return ret; |
| 630 | } |
| 631 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 632 | /* DPLL autoidle read/set code */ |
| 633 | |
| 634 | /** |
| 635 | * omap3_dpll_autoidle_read - read a DPLL's autoidle bits |
| 636 | * @clk: struct clk * of the DPLL to read |
| 637 | * |
| 638 | * Return the DPLL's autoidle bits, shifted down to bit 0. Returns |
| 639 | * -EINVAL if passed a null pointer or if the struct clk does not |
| 640 | * appear to refer to a DPLL. |
| 641 | */ |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 642 | static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 643 | { |
| 644 | const struct dpll_data *dd; |
| 645 | u32 v; |
| 646 | |
| 647 | if (!clk || !clk->dpll_data) |
| 648 | return -EINVAL; |
| 649 | |
| 650 | dd = clk->dpll_data; |
| 651 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 652 | if (!dd->autoidle_mask) |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 653 | return -EINVAL; |
| 654 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 655 | v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 656 | v &= dd->autoidle_mask; |
| 657 | v >>= __ffs(dd->autoidle_mask); |
| 658 | |
| 659 | return v; |
| 660 | } |
| 661 | |
| 662 | /** |
| 663 | * omap3_dpll_allow_idle - enable DPLL autoidle bits |
| 664 | * @clk: struct clk * of the DPLL to operate on |
| 665 | * |
| 666 | * Enable DPLL automatic idle control. This automatic idle mode |
| 667 | * switching takes effect only when the DPLL is locked, at least on |
| 668 | * OMAP3430. The DPLL will enter low-power stop when its downstream |
| 669 | * clocks are gated. No return value. |
| 670 | */ |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 671 | static void omap3_dpll_allow_idle(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 672 | { |
| 673 | const struct dpll_data *dd; |
| 674 | u32 v; |
| 675 | |
| 676 | if (!clk || !clk->dpll_data) |
| 677 | return; |
| 678 | |
| 679 | dd = clk->dpll_data; |
| 680 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 681 | if (!dd->autoidle_mask) |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 682 | return; |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 683 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 684 | /* |
| 685 | * REVISIT: CORE DPLL can optionally enter low-power bypass |
| 686 | * by writing 0x5 instead of 0x1. Add some mechanism to |
| 687 | * optionally enter this mode. |
| 688 | */ |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 689 | v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 690 | v &= ~dd->autoidle_mask; |
| 691 | v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 692 | ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | /** |
| 696 | * omap3_dpll_deny_idle - prevent DPLL from automatically idling |
| 697 | * @clk: struct clk * of the DPLL to operate on |
| 698 | * |
| 699 | * Disable DPLL automatic idle control. No return value. |
| 700 | */ |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 701 | static void omap3_dpll_deny_idle(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 702 | { |
| 703 | const struct dpll_data *dd; |
| 704 | u32 v; |
| 705 | |
| 706 | if (!clk || !clk->dpll_data) |
| 707 | return; |
| 708 | |
| 709 | dd = clk->dpll_data; |
| 710 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 711 | if (!dd->autoidle_mask) |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 712 | return; |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 713 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 714 | v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 715 | v &= ~dd->autoidle_mask; |
| 716 | v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 717 | ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 718 | } |
| 719 | |
| 720 | /* Clock control for DPLL outputs */ |
| 721 | |
Tomi Valkeinen | 994c41e | 2014-01-30 13:17:20 +0200 | [diff] [blame] | 722 | /* Find the parent DPLL for the given clkoutx2 clock */ |
| 723 | static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 724 | { |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 725 | struct clk_hw_omap *pclk = NULL; |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 726 | |
| 727 | /* Walk up the parents of clk, looking for a DPLL */ |
| 728 | do { |
| 729 | do { |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 730 | hw = clk_hw_get_parent(hw); |
Tero Kristo | 8aa09cf | 2019-01-15 11:15:14 +0200 | [diff] [blame] | 731 | } while (hw && (!omap2_clk_is_hw_omap(hw))); |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 732 | if (!hw) |
| 733 | break; |
| 734 | pclk = to_clk_hw_omap(hw); |
| 735 | } while (pclk && !pclk->dpll_data); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 736 | |
Paul Walmsley | a032d33 | 2012-08-03 09:21:10 -0600 | [diff] [blame] | 737 | /* clk does not have a DPLL as a parent? error in the clock data */ |
| 738 | if (!pclk) { |
| 739 | WARN_ON(1); |
Tomi Valkeinen | 994c41e | 2014-01-30 13:17:20 +0200 | [diff] [blame] | 740 | return NULL; |
Paul Walmsley | a032d33 | 2012-08-03 09:21:10 -0600 | [diff] [blame] | 741 | } |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 742 | |
Tomi Valkeinen | 994c41e | 2014-01-30 13:17:20 +0200 | [diff] [blame] | 743 | return pclk; |
| 744 | } |
| 745 | |
| 746 | /** |
| 747 | * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate |
| 748 | * @clk: DPLL output struct clk |
| 749 | * |
| 750 | * Using parent clock DPLL data, look up DPLL state. If locked, set our |
| 751 | * rate to the dpll_clk * 2; otherwise, just use dpll_clk. |
| 752 | */ |
| 753 | unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, |
| 754 | unsigned long parent_rate) |
| 755 | { |
| 756 | const struct dpll_data *dd; |
| 757 | unsigned long rate; |
| 758 | u32 v; |
| 759 | struct clk_hw_omap *pclk = NULL; |
| 760 | |
| 761 | if (!parent_rate) |
| 762 | return 0; |
| 763 | |
| 764 | pclk = omap3_find_clkoutx2_dpll(hw); |
| 765 | |
| 766 | if (!pclk) |
| 767 | return 0; |
| 768 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 769 | dd = pclk->dpll_data; |
| 770 | |
| 771 | WARN_ON(!dd->enable_mask); |
| 772 | |
Tero Kristo | 6c0afb5 | 2017-02-09 11:24:37 +0200 | [diff] [blame] | 773 | v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 774 | v >>= __ffs(dd->enable_mask); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 775 | if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 776 | rate = parent_rate; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 777 | else |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 778 | rate = parent_rate * 2; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 779 | return rate; |
| 780 | } |
Vaibhav Hiremath | 353cec4 | 2012-07-05 08:05:15 -0700 | [diff] [blame] | 781 | |
Russ Dill | d6e7bbc | 2018-09-04 12:19:37 +0530 | [diff] [blame] | 782 | /** |
| 783 | * omap3_core_dpll_save_context - Save the m and n values of the divider |
| 784 | * @hw: pointer struct clk_hw |
| 785 | * |
| 786 | * Before the dpll registers are lost save the last rounded rate m and n |
| 787 | * and the enable mask. |
| 788 | */ |
| 789 | int omap3_core_dpll_save_context(struct clk_hw *hw) |
| 790 | { |
| 791 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 792 | struct dpll_data *dd; |
| 793 | u32 v; |
| 794 | |
| 795 | dd = clk->dpll_data; |
| 796 | |
| 797 | v = ti_clk_ll_ops->clk_readl(&dd->control_reg); |
| 798 | clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask); |
| 799 | |
| 800 | if (clk->context == DPLL_LOCKED) { |
| 801 | v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); |
| 802 | dd->last_rounded_m = (v & dd->mult_mask) >> |
| 803 | __ffs(dd->mult_mask); |
| 804 | dd->last_rounded_n = ((v & dd->div1_mask) >> |
| 805 | __ffs(dd->div1_mask)) + 1; |
| 806 | } |
| 807 | |
| 808 | return 0; |
| 809 | } |
| 810 | |
| 811 | /** |
| 812 | * omap3_core_dpll_restore_context - restore the m and n values of the divider |
| 813 | * @hw: pointer struct clk_hw |
| 814 | * |
| 815 | * Restore the last rounded rate m and n |
| 816 | * and the enable mask. |
| 817 | */ |
| 818 | void omap3_core_dpll_restore_context(struct clk_hw *hw) |
| 819 | { |
| 820 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 821 | const struct dpll_data *dd; |
| 822 | u32 v; |
| 823 | |
| 824 | dd = clk->dpll_data; |
| 825 | |
| 826 | if (clk->context == DPLL_LOCKED) { |
| 827 | _omap3_dpll_write_clken(clk, 0x4); |
| 828 | _omap3_wait_dpll_status(clk, 0); |
| 829 | |
| 830 | v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); |
| 831 | v &= ~(dd->mult_mask | dd->div1_mask); |
| 832 | v |= dd->last_rounded_m << __ffs(dd->mult_mask); |
| 833 | v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); |
| 834 | ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg); |
| 835 | |
| 836 | _omap3_dpll_write_clken(clk, DPLL_LOCKED); |
| 837 | _omap3_wait_dpll_status(clk, 1); |
| 838 | } else { |
| 839 | _omap3_dpll_write_clken(clk, clk->context); |
| 840 | } |
| 841 | } |
| 842 | |
| 843 | /** |
| 844 | * omap3_non_core_dpll_save_context - Save the m and n values of the divider |
| 845 | * @hw: pointer struct clk_hw |
| 846 | * |
| 847 | * Before the dpll registers are lost save the last rounded rate m and n |
| 848 | * and the enable mask. |
| 849 | */ |
| 850 | int omap3_noncore_dpll_save_context(struct clk_hw *hw) |
| 851 | { |
| 852 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 853 | struct dpll_data *dd; |
| 854 | u32 v; |
| 855 | |
| 856 | dd = clk->dpll_data; |
| 857 | |
| 858 | v = ti_clk_ll_ops->clk_readl(&dd->control_reg); |
| 859 | clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask); |
| 860 | |
| 861 | if (clk->context == DPLL_LOCKED) { |
| 862 | v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); |
| 863 | dd->last_rounded_m = (v & dd->mult_mask) >> |
| 864 | __ffs(dd->mult_mask); |
| 865 | dd->last_rounded_n = ((v & dd->div1_mask) >> |
| 866 | __ffs(dd->div1_mask)) + 1; |
| 867 | } |
| 868 | |
| 869 | return 0; |
| 870 | } |
| 871 | |
| 872 | /** |
| 873 | * omap3_core_dpll_restore_context - restore the m and n values of the divider |
| 874 | * @hw: pointer struct clk_hw |
| 875 | * |
| 876 | * Restore the last rounded rate m and n |
| 877 | * and the enable mask. |
| 878 | */ |
| 879 | void omap3_noncore_dpll_restore_context(struct clk_hw *hw) |
| 880 | { |
| 881 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 882 | const struct dpll_data *dd; |
| 883 | u32 ctrl, mult_div1; |
| 884 | |
| 885 | dd = clk->dpll_data; |
| 886 | |
| 887 | ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg); |
| 888 | mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); |
| 889 | |
| 890 | if (clk->context == ((ctrl & dd->enable_mask) >> |
| 891 | __ffs(dd->enable_mask)) && |
| 892 | dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >> |
| 893 | __ffs(dd->mult_mask)) && |
| 894 | dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >> |
| 895 | __ffs(dd->div1_mask)) + 1) { |
| 896 | /* nothing to be done */ |
| 897 | return; |
| 898 | } |
| 899 | |
| 900 | if (clk->context == DPLL_LOCKED) |
| 901 | omap3_noncore_dpll_program(clk, 0); |
| 902 | else |
| 903 | _omap3_dpll_write_clken(clk, clk->context); |
| 904 | } |
| 905 | |
Vaibhav Hiremath | 353cec4 | 2012-07-05 08:05:15 -0700 | [diff] [blame] | 906 | /* OMAP3/4 non-CORE DPLL clkops */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 907 | const struct clk_hw_omap_ops clkhwops_omap3_dpll = { |
| 908 | .allow_idle = omap3_dpll_allow_idle, |
| 909 | .deny_idle = omap3_dpll_deny_idle, |
| 910 | }; |
Tero Kristo | 0565fb1 | 2015-03-03 13:27:48 +0200 | [diff] [blame] | 911 | |
| 912 | /** |
| 913 | * omap3_dpll4_set_rate - set rate for omap3 per-dpll |
| 914 | * @hw: clock to change |
| 915 | * @rate: target rate for clock |
| 916 | * @parent_rate: rate of the parent clock |
| 917 | * |
| 918 | * Check if the current SoC supports the per-dpll reprogram operation |
| 919 | * or not, and then do the rate change if supported. Returns -EINVAL |
| 920 | * if not supported, 0 for success, and potential error codes from the |
| 921 | * clock rate change. |
| 922 | */ |
| 923 | int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate, |
| 924 | unsigned long parent_rate) |
| 925 | { |
| 926 | /* |
| 927 | * According to the 12-5 CDP code from TI, "Limitation 2.5" |
| 928 | * on 3430ES1 prevents us from changing DPLL multipliers or dividers |
| 929 | * on DPLL4. |
| 930 | */ |
| 931 | if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) { |
| 932 | pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); |
| 933 | return -EINVAL; |
| 934 | } |
| 935 | |
| 936 | return omap3_noncore_dpll_set_rate(hw, rate, parent_rate); |
| 937 | } |
| 938 | |
| 939 | /** |
| 940 | * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll |
| 941 | * @hw: clock to change |
| 942 | * @rate: target rate for clock |
| 943 | * @parent_rate: rate of the parent clock |
| 944 | * @index: parent index, 0 - reference clock, 1 - bypass clock |
| 945 | * |
| 946 | * Check if the current SoC support the per-dpll reprogram operation |
| 947 | * or not, and then do the rate + parent change if supported. Returns |
| 948 | * -EINVAL if not supported, 0 for success, and potential error codes |
| 949 | * from the clock rate change. |
| 950 | */ |
| 951 | int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, |
| 952 | unsigned long parent_rate, u8 index) |
| 953 | { |
| 954 | if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) { |
| 955 | pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); |
| 956 | return -EINVAL; |
| 957 | } |
| 958 | |
| 959 | return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate, |
| 960 | index); |
| 961 | } |
Richard Watts | 035cd48 | 2016-12-02 23:14:38 +0200 | [diff] [blame] | 962 | |
| 963 | /* Apply DM3730 errata sprz319 advisory 2.1. */ |
| 964 | static bool omap3_dpll5_apply_errata(struct clk_hw *hw, |
| 965 | unsigned long parent_rate) |
| 966 | { |
| 967 | struct omap3_dpll5_settings { |
| 968 | unsigned int rate, m, n; |
| 969 | }; |
| 970 | |
| 971 | static const struct omap3_dpll5_settings precomputed[] = { |
| 972 | /* |
| 973 | * From DM3730 errata advisory 2.1, table 35 and 36. |
| 974 | * The N value is increased by 1 compared to the tables as the |
| 975 | * errata lists register values while last_rounded_field is the |
| 976 | * real divider value. |
| 977 | */ |
| 978 | { 12000000, 80, 0 + 1 }, |
| 979 | { 13000000, 443, 5 + 1 }, |
| 980 | { 19200000, 50, 0 + 1 }, |
| 981 | { 26000000, 443, 11 + 1 }, |
| 982 | { 38400000, 25, 0 + 1 } |
| 983 | }; |
| 984 | |
| 985 | const struct omap3_dpll5_settings *d; |
| 986 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 987 | struct dpll_data *dd; |
| 988 | unsigned int i; |
| 989 | |
| 990 | for (i = 0; i < ARRAY_SIZE(precomputed); ++i) { |
| 991 | if (parent_rate == precomputed[i].rate) |
| 992 | break; |
| 993 | } |
| 994 | |
| 995 | if (i == ARRAY_SIZE(precomputed)) |
| 996 | return false; |
| 997 | |
| 998 | d = &precomputed[i]; |
| 999 | |
| 1000 | /* Update the M, N and rounded rate values and program the DPLL. */ |
| 1001 | dd = clk->dpll_data; |
| 1002 | dd->last_rounded_m = d->m; |
| 1003 | dd->last_rounded_n = d->n; |
| 1004 | dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n); |
| 1005 | omap3_noncore_dpll_program(clk, 0); |
| 1006 | |
| 1007 | return true; |
| 1008 | } |
| 1009 | |
| 1010 | /** |
| 1011 | * omap3_dpll5_set_rate - set rate for omap3 dpll5 |
| 1012 | * @hw: clock to change |
| 1013 | * @rate: target rate for clock |
| 1014 | * @parent_rate: rate of the parent clock |
| 1015 | * |
| 1016 | * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if |
| 1017 | * the DPLL is used for USB host (detected through the requested rate). |
| 1018 | */ |
| 1019 | int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate, |
| 1020 | unsigned long parent_rate) |
| 1021 | { |
| 1022 | if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) { |
| 1023 | if (omap3_dpll5_apply_errata(hw, parent_rate)) |
| 1024 | return 0; |
| 1025 | } |
| 1026 | |
| 1027 | return omap3_noncore_dpll_set_rate(hw, rate, parent_rate); |
| 1028 | } |