Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/kernel/smp_twd.c |
| 3 | * |
| 4 | * Copyright (C) 2002 ARM Ltd. |
| 5 | * All Rights Reserved |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/kernel.h> |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 13 | #include <linux/clk.h> |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 14 | #include <linux/cpu.h> |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 15 | #include <linux/delay.h> |
| 16 | #include <linux/device.h> |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 17 | #include <linux/err.h> |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 18 | #include <linux/smp.h> |
| 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/clockchips.h> |
Marc Zyngier | 92485104 | 2012-01-10 23:00:54 +0000 | [diff] [blame] | 21 | #include <linux/interrupt.h> |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 22 | #include <linux/io.h> |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 23 | #include <linux/of_irq.h> |
| 24 | #include <linux/of_address.h> |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 25 | |
| 26 | #include <asm/smp_twd.h> |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 27 | |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 28 | /* set up by the platform code */ |
Marc Zyngier | 92485104 | 2012-01-10 23:00:54 +0000 | [diff] [blame] | 29 | static void __iomem *twd_base; |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 30 | |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 31 | static struct clk *twd_clk; |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 32 | static unsigned long twd_timer_rate; |
Linus Walleij | a68becd | 2012-10-23 08:29:48 +0100 | [diff] [blame] | 33 | static DEFINE_PER_CPU(bool, percpu_setup_called); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 34 | |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 35 | static struct clock_event_device __percpu *twd_evt; |
Russell King | e1b8c05 | 2015-10-09 16:18:09 +0100 | [diff] [blame] | 36 | static unsigned int twd_features = |
| 37 | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 38 | static int twd_ppi; |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 39 | |
Viresh Kumar | 5e25357 | 2015-02-27 13:39:52 +0530 | [diff] [blame] | 40 | static int twd_shutdown(struct clock_event_device *clk) |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 41 | { |
Viresh Kumar | 5e25357 | 2015-02-27 13:39:52 +0530 | [diff] [blame] | 42 | writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); |
| 43 | return 0; |
| 44 | } |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 45 | |
Viresh Kumar | 5e25357 | 2015-02-27 13:39:52 +0530 | [diff] [blame] | 46 | static int twd_set_oneshot(struct clock_event_device *clk) |
| 47 | { |
| 48 | /* period set, and timer enabled in 'next_event' hook */ |
| 49 | writel_relaxed(TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT, |
| 50 | twd_base + TWD_TIMER_CONTROL); |
| 51 | return 0; |
| 52 | } |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 53 | |
Viresh Kumar | 5e25357 | 2015-02-27 13:39:52 +0530 | [diff] [blame] | 54 | static int twd_set_periodic(struct clock_event_device *clk) |
| 55 | { |
| 56 | unsigned long ctrl = TWD_TIMER_CONTROL_ENABLE | |
| 57 | TWD_TIMER_CONTROL_IT_ENABLE | |
| 58 | TWD_TIMER_CONTROL_PERIODIC; |
| 59 | |
| 60 | writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), |
| 61 | twd_base + TWD_TIMER_LOAD); |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 62 | writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL); |
Viresh Kumar | 5e25357 | 2015-02-27 13:39:52 +0530 | [diff] [blame] | 63 | return 0; |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | static int twd_set_next_event(unsigned long evt, |
| 67 | struct clock_event_device *unused) |
| 68 | { |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 69 | unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 70 | |
Russell King | 4c5158d | 2009-05-17 10:58:54 +0100 | [diff] [blame] | 71 | ctrl |= TWD_TIMER_CONTROL_ENABLE; |
| 72 | |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 73 | writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER); |
| 74 | writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 75 | |
| 76 | return 0; |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * local_timer_ack: checks for a local timer interrupt. |
| 81 | * |
| 82 | * If a local timer interrupt has occurred, acknowledge and return 1. |
| 83 | * Otherwise, return 0. |
| 84 | */ |
Marc Zyngier | 92485104 | 2012-01-10 23:00:54 +0000 | [diff] [blame] | 85 | static int twd_timer_ack(void) |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 86 | { |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 87 | if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) { |
| 88 | writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 89 | return 1; |
| 90 | } |
| 91 | |
| 92 | return 0; |
| 93 | } |
| 94 | |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 95 | static void twd_timer_stop(void) |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 96 | { |
Christoph Lameter | 06b96c8 | 2014-08-17 12:30:42 -0500 | [diff] [blame] | 97 | struct clock_event_device *clk = raw_cpu_ptr(twd_evt); |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 98 | |
Viresh Kumar | 5e25357 | 2015-02-27 13:39:52 +0530 | [diff] [blame] | 99 | twd_shutdown(clk); |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 100 | disable_percpu_irq(clk->irq); |
| 101 | } |
| 102 | |
Mike Turquette | 2b25d9f | 2012-09-13 22:04:18 +0100 | [diff] [blame] | 103 | #ifdef CONFIG_COMMON_CLK |
| 104 | |
| 105 | /* |
| 106 | * Updates clockevent frequency when the cpu frequency changes. |
| 107 | * Called on the cpu that is changing frequency with interrupts disabled. |
| 108 | */ |
| 109 | static void twd_update_frequency(void *new_rate) |
| 110 | { |
| 111 | twd_timer_rate = *((unsigned long *) new_rate); |
| 112 | |
Christoph Lameter | 06b96c8 | 2014-08-17 12:30:42 -0500 | [diff] [blame] | 113 | clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate); |
Mike Turquette | 2b25d9f | 2012-09-13 22:04:18 +0100 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | static int twd_rate_change(struct notifier_block *nb, |
| 117 | unsigned long flags, void *data) |
| 118 | { |
| 119 | struct clk_notifier_data *cnd = data; |
| 120 | |
| 121 | /* |
| 122 | * The twd clock events must be reprogrammed to account for the new |
| 123 | * frequency. The timer is local to a cpu, so cross-call to the |
| 124 | * changing cpu. |
| 125 | */ |
| 126 | if (flags == POST_RATE_CHANGE) |
Jason Liu | cbbe6f8 | 2013-07-01 09:53:30 +0100 | [diff] [blame] | 127 | on_each_cpu(twd_update_frequency, |
Mike Turquette | 2b25d9f | 2012-09-13 22:04:18 +0100 | [diff] [blame] | 128 | (void *)&cnd->new_rate, 1); |
| 129 | |
| 130 | return NOTIFY_OK; |
| 131 | } |
| 132 | |
| 133 | static struct notifier_block twd_clk_nb = { |
| 134 | .notifier_call = twd_rate_change, |
| 135 | }; |
| 136 | |
| 137 | static int twd_clk_init(void) |
| 138 | { |
Christoph Lameter | 06b96c8 | 2014-08-17 12:30:42 -0500 | [diff] [blame] | 139 | if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) |
Mike Turquette | 2b25d9f | 2012-09-13 22:04:18 +0100 | [diff] [blame] | 140 | return clk_notifier_register(twd_clk, &twd_clk_nb); |
| 141 | |
| 142 | return 0; |
| 143 | } |
| 144 | core_initcall(twd_clk_init); |
| 145 | |
| 146 | #elif defined (CONFIG_CPU_FREQ) |
| 147 | |
| 148 | #include <linux/cpufreq.h> |
Linus Walleij | 4fd7f9b | 2011-12-13 12:48:18 +0100 | [diff] [blame] | 149 | |
| 150 | /* |
| 151 | * Updates clockevent frequency when the cpu frequency changes. |
| 152 | * Called on the cpu that is changing frequency with interrupts disabled. |
| 153 | */ |
| 154 | static void twd_update_frequency(void *data) |
| 155 | { |
| 156 | twd_timer_rate = clk_get_rate(twd_clk); |
| 157 | |
Christoph Lameter | 06b96c8 | 2014-08-17 12:30:42 -0500 | [diff] [blame] | 158 | clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate); |
Linus Walleij | 4fd7f9b | 2011-12-13 12:48:18 +0100 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | static int twd_cpufreq_transition(struct notifier_block *nb, |
| 162 | unsigned long state, void *data) |
| 163 | { |
| 164 | struct cpufreq_freqs *freqs = data; |
| 165 | |
| 166 | /* |
| 167 | * The twd clock events must be reprogrammed to account for the new |
| 168 | * frequency. The timer is local to a cpu, so cross-call to the |
| 169 | * changing cpu. |
| 170 | */ |
Viresh Kumar | 0b443ea | 2014-03-19 11:24:58 +0530 | [diff] [blame] | 171 | if (state == CPUFREQ_POSTCHANGE) |
Linus Walleij | 4fd7f9b | 2011-12-13 12:48:18 +0100 | [diff] [blame] | 172 | smp_call_function_single(freqs->cpu, twd_update_frequency, |
Russell King | 3cd88f9 | 2012-04-19 19:35:10 +0100 | [diff] [blame] | 173 | NULL, 1); |
Linus Walleij | 4fd7f9b | 2011-12-13 12:48:18 +0100 | [diff] [blame] | 174 | |
| 175 | return NOTIFY_OK; |
| 176 | } |
| 177 | |
| 178 | static struct notifier_block twd_cpufreq_nb = { |
| 179 | .notifier_call = twd_cpufreq_transition, |
| 180 | }; |
| 181 | |
| 182 | static int twd_cpufreq_init(void) |
| 183 | { |
Christoph Lameter | 06b96c8 | 2014-08-17 12:30:42 -0500 | [diff] [blame] | 184 | if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) |
Linus Walleij | 4fd7f9b | 2011-12-13 12:48:18 +0100 | [diff] [blame] | 185 | return cpufreq_register_notifier(&twd_cpufreq_nb, |
| 186 | CPUFREQ_TRANSITION_NOTIFIER); |
| 187 | |
| 188 | return 0; |
| 189 | } |
| 190 | core_initcall(twd_cpufreq_init); |
| 191 | |
| 192 | #endif |
| 193 | |
Paul Gortmaker | 8bd26e3 | 2013-06-17 15:43:14 -0400 | [diff] [blame] | 194 | static void twd_calibrate_rate(void) |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 195 | { |
Russell King | 03399c1cb | 2011-01-25 10:35:36 +0000 | [diff] [blame] | 196 | unsigned long count; |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 197 | u64 waitjiffies; |
| 198 | |
| 199 | /* |
| 200 | * If this is the first time round, we need to work out how fast |
| 201 | * the timer ticks |
| 202 | */ |
| 203 | if (twd_timer_rate == 0) { |
Russell King | 4ed89f2228 | 2014-10-28 11:26:42 +0000 | [diff] [blame] | 204 | pr_info("Calibrating local timer... "); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 205 | |
| 206 | /* Wait for a tick to start */ |
| 207 | waitjiffies = get_jiffies_64() + 1; |
| 208 | |
| 209 | while (get_jiffies_64() < waitjiffies) |
| 210 | udelay(10); |
| 211 | |
| 212 | /* OK, now the tick has started, let's get the timer going */ |
| 213 | waitjiffies += 5; |
| 214 | |
| 215 | /* enable, no interrupt or reload */ |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 216 | writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 217 | |
| 218 | /* maximum value */ |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 219 | writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 220 | |
| 221 | while (get_jiffies_64() < waitjiffies) |
| 222 | udelay(10); |
| 223 | |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 224 | count = readl_relaxed(twd_base + TWD_TIMER_COUNTER); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 225 | |
| 226 | twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); |
| 227 | |
Russell King | 4ed89f2228 | 2014-10-28 11:26:42 +0000 | [diff] [blame] | 228 | pr_cont("%lu.%02luMHz.\n", twd_timer_rate / 1000000, |
Vitaly Kuzmichev | 90c5ffe | 2011-07-07 14:56:05 +0100 | [diff] [blame] | 229 | (twd_timer_rate / 10000) % 100); |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 230 | } |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 231 | } |
| 232 | |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 233 | static irqreturn_t twd_handler(int irq, void *dev_id) |
| 234 | { |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 235 | struct clock_event_device *evt = dev_id; |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 236 | |
| 237 | if (twd_timer_ack()) { |
| 238 | evt->event_handler(evt); |
| 239 | return IRQ_HANDLED; |
| 240 | } |
| 241 | |
| 242 | return IRQ_NONE; |
| 243 | } |
| 244 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 245 | static void twd_get_clock(struct device_node *np) |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 246 | { |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 247 | int err; |
| 248 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 249 | if (np) |
| 250 | twd_clk = of_clk_get(np, 0); |
| 251 | else |
| 252 | twd_clk = clk_get_sys("smp_twd", NULL); |
| 253 | |
| 254 | if (IS_ERR(twd_clk)) { |
| 255 | pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); |
| 256 | return; |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 257 | } |
| 258 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 259 | err = clk_prepare_enable(twd_clk); |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 260 | if (err) { |
Linus Walleij | 2577cf2 | 2012-10-22 10:18:06 +0100 | [diff] [blame] | 261 | pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 262 | clk_put(twd_clk); |
| 263 | return; |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 264 | } |
| 265 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 266 | twd_timer_rate = clk_get_rate(twd_clk); |
Linus Walleij | 5def51b | 2011-12-13 12:47:31 +0100 | [diff] [blame] | 267 | } |
| 268 | |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 269 | /* |
| 270 | * Setup the local clock events for a CPU. |
| 271 | */ |
Olof Johansson | 47dcd35 | 2013-07-23 14:51:34 -0700 | [diff] [blame] | 272 | static void twd_timer_setup(void) |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 273 | { |
Christoph Lameter | 06b96c8 | 2014-08-17 12:30:42 -0500 | [diff] [blame] | 274 | struct clock_event_device *clk = raw_cpu_ptr(twd_evt); |
Linus Walleij | a68becd | 2012-10-23 08:29:48 +0100 | [diff] [blame] | 275 | int cpu = smp_processor_id(); |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 276 | |
Linus Walleij | a68becd | 2012-10-23 08:29:48 +0100 | [diff] [blame] | 277 | /* |
| 278 | * If the basic setup for this CPU has been done before don't |
| 279 | * bother with the below. |
| 280 | */ |
| 281 | if (per_cpu(percpu_setup_called, cpu)) { |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 282 | writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 283 | clockevents_register_device(clk); |
Linus Walleij | a68becd | 2012-10-23 08:29:48 +0100 | [diff] [blame] | 284 | enable_percpu_irq(clk->irq, 0); |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 285 | return; |
Linus Walleij | a68becd | 2012-10-23 08:29:48 +0100 | [diff] [blame] | 286 | } |
| 287 | per_cpu(percpu_setup_called, cpu) = true; |
| 288 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 289 | twd_calibrate_rate(); |
Linus Walleij | a68becd | 2012-10-23 08:29:48 +0100 | [diff] [blame] | 290 | |
| 291 | /* |
| 292 | * The following is done once per CPU the first time .setup() is |
| 293 | * called. |
| 294 | */ |
Ben Dooks | 2e874ea | 2013-02-06 18:44:20 +0000 | [diff] [blame] | 295 | writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); |
Marc Zyngier | c214455 | 2012-01-20 12:24:47 +0100 | [diff] [blame] | 296 | |
Russell King | 4c5158d | 2009-05-17 10:58:54 +0100 | [diff] [blame] | 297 | clk->name = "local_timer"; |
Russell King | e1b8c05 | 2015-10-09 16:18:09 +0100 | [diff] [blame] | 298 | clk->features = twd_features; |
Russell King | 4c5158d | 2009-05-17 10:58:54 +0100 | [diff] [blame] | 299 | clk->rating = 350; |
Viresh Kumar | 5e25357 | 2015-02-27 13:39:52 +0530 | [diff] [blame] | 300 | clk->set_state_shutdown = twd_shutdown; |
| 301 | clk->set_state_periodic = twd_set_periodic; |
| 302 | clk->set_state_oneshot = twd_set_oneshot; |
| 303 | clk->tick_resume = twd_shutdown; |
Russell King | 4c5158d | 2009-05-17 10:58:54 +0100 | [diff] [blame] | 304 | clk->set_next_event = twd_set_next_event; |
Marc Zyngier | 92485104 | 2012-01-10 23:00:54 +0000 | [diff] [blame] | 305 | clk->irq = twd_ppi; |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 306 | clk->cpumask = cpumask_of(cpu); |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 307 | |
Linus Walleij | 54d15b1 | 2011-12-13 12:46:43 +0100 | [diff] [blame] | 308 | clockevents_config_and_register(clk, twd_timer_rate, |
| 309 | 0xf, 0xffffffff); |
Marc Zyngier | 28af690 | 2011-07-22 12:52:37 +0100 | [diff] [blame] | 310 | enable_percpu_irq(clk->irq, 0); |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 311 | } |
| 312 | |
Olof Johansson | 47dcd35 | 2013-07-23 14:51:34 -0700 | [diff] [blame] | 313 | static int twd_timer_cpu_notify(struct notifier_block *self, |
| 314 | unsigned long action, void *hcpu) |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 315 | { |
| 316 | switch (action & ~CPU_TASKS_FROZEN) { |
| 317 | case CPU_STARTING: |
| 318 | twd_timer_setup(); |
| 319 | break; |
| 320 | case CPU_DYING: |
| 321 | twd_timer_stop(); |
| 322 | break; |
| 323 | } |
| 324 | |
| 325 | return NOTIFY_OK; |
| 326 | } |
| 327 | |
Olof Johansson | 47dcd35 | 2013-07-23 14:51:34 -0700 | [diff] [blame] | 328 | static struct notifier_block twd_timer_cpu_nb = { |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 329 | .notifier_call = twd_timer_cpu_notify, |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 330 | }; |
| 331 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 332 | static int __init twd_local_timer_common_register(struct device_node *np) |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 333 | { |
| 334 | int err; |
| 335 | |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 336 | twd_evt = alloc_percpu(struct clock_event_device); |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 337 | if (!twd_evt) { |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 338 | err = -ENOMEM; |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 339 | goto out_free; |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); |
| 343 | if (err) { |
| 344 | pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 345 | goto out_free; |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 346 | } |
| 347 | |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 348 | err = register_cpu_notifier(&twd_timer_cpu_nb); |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 349 | if (err) |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 350 | goto out_irq; |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 351 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 352 | twd_get_clock(np); |
Marc Gonzalez | 194444c | 2015-10-09 10:20:47 +0100 | [diff] [blame] | 353 | if (!of_property_read_bool(np, "always-on")) |
Russell King | e1b8c05 | 2015-10-09 16:18:09 +0100 | [diff] [blame] | 354 | twd_features |= CLOCK_EVT_FEAT_C3STOP; |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 355 | |
Stephen Boyd | a894fcc2 | 2013-02-15 16:02:20 -0800 | [diff] [blame] | 356 | /* |
| 357 | * Immediately configure the timer on the boot CPU, unless we need |
| 358 | * jiffies to be incrementing to calibrate the rate in which case |
| 359 | * setup the timer in late_time_init. |
| 360 | */ |
| 361 | if (twd_timer_rate) |
| 362 | twd_timer_setup(); |
| 363 | else |
| 364 | late_time_init = twd_timer_setup; |
| 365 | |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 366 | return 0; |
| 367 | |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 368 | out_irq: |
| 369 | free_percpu_irq(twd_ppi, twd_evt); |
| 370 | out_free: |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 371 | iounmap(twd_base); |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 372 | twd_base = NULL; |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 373 | free_percpu(twd_evt); |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 374 | |
Marc Zyngier | 81e46f7 | 2012-01-10 19:39:26 +0000 | [diff] [blame] | 375 | return err; |
Russell King | f32f4ce | 2009-05-16 12:14:21 +0100 | [diff] [blame] | 376 | } |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 377 | |
| 378 | int __init twd_local_timer_register(struct twd_local_timer *tlt) |
| 379 | { |
| 380 | if (twd_base || twd_evt) |
| 381 | return -EBUSY; |
| 382 | |
| 383 | twd_ppi = tlt->res[1].start; |
| 384 | |
| 385 | twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0])); |
| 386 | if (!twd_base) |
| 387 | return -ENOMEM; |
| 388 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 389 | return twd_local_timer_common_register(NULL); |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 390 | } |
| 391 | |
| 392 | #ifdef CONFIG_OF |
Daniel Lezcano | dcbc0ed | 2016-06-07 00:03:11 +0200 | [diff] [blame] | 393 | static int __init twd_local_timer_of_register(struct device_node *np) |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 394 | { |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 395 | int err; |
| 396 | |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 397 | twd_ppi = irq_of_parse_and_map(np, 0); |
| 398 | if (!twd_ppi) { |
| 399 | err = -EINVAL; |
| 400 | goto out; |
| 401 | } |
| 402 | |
| 403 | twd_base = of_iomap(np, 0); |
| 404 | if (!twd_base) { |
| 405 | err = -ENOMEM; |
| 406 | goto out; |
| 407 | } |
| 408 | |
Rob Herring | bd60345 | 2013-01-28 16:13:12 +0000 | [diff] [blame] | 409 | err = twd_local_timer_common_register(np); |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 410 | |
| 411 | out: |
| 412 | WARN(err, "twd_local_timer_of_register failed (%d)\n", err); |
Daniel Lezcano | dcbc0ed | 2016-06-07 00:03:11 +0200 | [diff] [blame] | 413 | return err; |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 414 | } |
Daniel Lezcano | 177cf6e | 2016-06-07 00:27:44 +0200 | [diff] [blame^] | 415 | CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); |
| 416 | CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register); |
| 417 | CLOCKSOURCE_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register); |
Marc Zyngier | d8e0364 | 2012-01-10 22:15:45 +0000 | [diff] [blame] | 418 | #endif |