blob: 60d62a292fce6f3170eb67416e517647d670d8b4 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_IRQFLAGS_H
16#define _ASM_TILE_IRQFLAGS_H
17
Chris Metcalf867e3592010-05-28 23:09:12 -040018#include <arch/interrupts.h>
19#include <arch/chip.h>
20
21/*
22 * The set of interrupts we want to allow when interrupts are nominally
23 * disabled. The remainder are effectively "NMI" interrupts from
24 * the point of view of the generic Linux code. Note that synchronous
25 * interrupts (aka "non-queued") are not blocked by the mask in any case.
26 */
Chris Metcalf867e3592010-05-28 23:09:12 -040027#define LINUX_MASKABLE_INTERRUPTS \
Chris Metcalf7f04f082013-02-01 13:01:36 -050028 (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
Chris Metcalf867e3592010-05-28 23:09:12 -040029
Chris Metcalf7f04f082013-02-01 13:01:36 -050030#if CHIP_HAS_SPLIT_INTR_MASK()
31/* The same macro, but for the two 32-bit SPRs separately. */
32#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
33#define LINUX_MASKABLE_INTERRUPTS_HI \
34 (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
Chris Metcalf0dccb042011-03-17 14:32:06 -040035#endif
36
Chris Metcalf867e3592010-05-28 23:09:12 -040037#ifndef __ASSEMBLY__
38
39/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
40#include <asm/percpu.h>
41#include <arch/spr_def.h>
42
Chris Metcalf3e2e0d22013-04-09 12:33:07 -040043/*
44 * Set and clear kernel interrupt masks.
45 *
46 * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
47 * clobber. We rely on it being equivalent to a compiler barrier in
48 * this code since arch_local_irq_save() and friends must act as
49 * compiler barriers. This compiler semantic is baked into enough
50 * places that the compiler will maintain it going forward.
51 */
Chris Metcalf867e3592010-05-28 23:09:12 -040052#if CHIP_HAS_SPLIT_INTR_MASK()
53#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
54# error Fix assumptions about which word various interrupts are in
55#endif
56#define interrupt_mask_set(n) do { \
57 int __n = (n); \
58 int __mask = 1 << (__n & 0x1f); \
59 if (__n < 32) \
Chris Metcalfa78c9422010-10-14 16:23:03 -040060 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
Chris Metcalf867e3592010-05-28 23:09:12 -040061 else \
Chris Metcalfa78c9422010-10-14 16:23:03 -040062 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
Chris Metcalf867e3592010-05-28 23:09:12 -040063} while (0)
64#define interrupt_mask_reset(n) do { \
65 int __n = (n); \
66 int __mask = 1 << (__n & 0x1f); \
67 if (__n < 32) \
Chris Metcalfa78c9422010-10-14 16:23:03 -040068 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
Chris Metcalf867e3592010-05-28 23:09:12 -040069 else \
Chris Metcalfa78c9422010-10-14 16:23:03 -040070 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
Chris Metcalf867e3592010-05-28 23:09:12 -040071} while (0)
72#define interrupt_mask_check(n) ({ \
73 int __n = (n); \
74 (((__n < 32) ? \
Chris Metcalfa78c9422010-10-14 16:23:03 -040075 __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
76 __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
Chris Metcalf867e3592010-05-28 23:09:12 -040077 >> (__n & 0x1f)) & 1; \
78})
79#define interrupt_mask_set_mask(mask) do { \
80 unsigned long long __m = (mask); \
Chris Metcalfa78c9422010-10-14 16:23:03 -040081 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
82 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
Chris Metcalf867e3592010-05-28 23:09:12 -040083} while (0)
84#define interrupt_mask_reset_mask(mask) do { \
85 unsigned long long __m = (mask); \
Chris Metcalfa78c9422010-10-14 16:23:03 -040086 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
87 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
Chris Metcalf867e3592010-05-28 23:09:12 -040088} while (0)
Chris Metcalf51007002012-03-27 15:40:20 -040089#define interrupt_mask_save_mask() \
90 (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
91 (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
92#define interrupt_mask_restore_mask(mask) do { \
93 unsigned long long __m = (mask); \
94 __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
95 __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
96} while (0)
Chris Metcalf867e3592010-05-28 23:09:12 -040097#else
98#define interrupt_mask_set(n) \
Chris Metcalfa78c9422010-10-14 16:23:03 -040099 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
Chris Metcalf867e3592010-05-28 23:09:12 -0400100#define interrupt_mask_reset(n) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400101 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
Chris Metcalf867e3592010-05-28 23:09:12 -0400102#define interrupt_mask_check(n) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400103 ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
Chris Metcalf867e3592010-05-28 23:09:12 -0400104#define interrupt_mask_set_mask(mask) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400105 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
Chris Metcalf867e3592010-05-28 23:09:12 -0400106#define interrupt_mask_reset_mask(mask) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400107 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
Chris Metcalf51007002012-03-27 15:40:20 -0400108#define interrupt_mask_save_mask() \
109 __insn_mfspr(SPR_INTERRUPT_MASK_K)
110#define interrupt_mask_restore_mask(mask) \
111 __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
Chris Metcalf867e3592010-05-28 23:09:12 -0400112#endif
113
114/*
115 * The set of interrupts we want active if irqs are enabled.
116 * Note that in particular, the tile timer interrupt comes and goes
117 * from this set, since we have no other way to turn off the timer.
Chris Metcalfa78c9422010-10-14 16:23:03 -0400118 * Likewise, INTCTRL_K is removed and re-added during device
Chris Metcalf867e3592010-05-28 23:09:12 -0400119 * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
120 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
121 * is always claimed as an "active interrupt" so we can query that bit
122 * to know our current state.
123 */
124DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
Chris Metcalf7f04f082013-02-01 13:01:36 -0500125#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
Chris Metcalf867e3592010-05-28 23:09:12 -0400126
Chris Metcalfbc1a2982013-08-07 11:36:54 -0400127#ifdef CONFIG_DEBUG_PREEMPT
128/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
129extern unsigned int debug_smp_processor_id(void);
130# define smp_processor_id() debug_smp_processor_id()
131#endif
132
Chris Metcalf867e3592010-05-28 23:09:12 -0400133/* Disable interrupts. */
David Howellsdf9ee292010-10-07 14:08:55 +0100134#define arch_local_irq_disable() \
Chris Metcalf867e3592010-05-28 23:09:12 -0400135 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
136
137/* Disable all interrupts, including NMIs. */
David Howellsdf9ee292010-10-07 14:08:55 +0100138#define arch_local_irq_disable_all() \
Chris Metcalf51007002012-03-27 15:40:20 -0400139 interrupt_mask_set_mask(-1ULL)
Chris Metcalf867e3592010-05-28 23:09:12 -0400140
Chris Metcalfbc1a2982013-08-07 11:36:54 -0400141/*
142 * Read the set of maskable interrupts.
Christoph Lameterb4f50192014-08-17 12:30:50 -0500143 * We avoid the preemption warning here via raw_cpu_ptr since even
Chris Metcalfbc1a2982013-08-07 11:36:54 -0400144 * if irqs are already enabled, it's harmless to read the wrong cpu's
145 * enabled mask.
146 */
147#define arch_local_irqs_enabled() \
Christoph Lameterb4f50192014-08-17 12:30:50 -0500148 (*raw_cpu_ptr(&interrupts_enabled_mask))
Chris Metcalfbc1a2982013-08-07 11:36:54 -0400149
Chris Metcalf867e3592010-05-28 23:09:12 -0400150/* Re-enable all maskable interrupts. */
David Howellsdf9ee292010-10-07 14:08:55 +0100151#define arch_local_irq_enable() \
Chris Metcalfbc1a2982013-08-07 11:36:54 -0400152 interrupt_mask_reset_mask(arch_local_irqs_enabled())
Chris Metcalf867e3592010-05-28 23:09:12 -0400153
154/* Disable or enable interrupts based on flag argument. */
David Howellsdf9ee292010-10-07 14:08:55 +0100155#define arch_local_irq_restore(disabled) do { \
Chris Metcalf867e3592010-05-28 23:09:12 -0400156 if (disabled) \
David Howellsdf9ee292010-10-07 14:08:55 +0100157 arch_local_irq_disable(); \
Chris Metcalf867e3592010-05-28 23:09:12 -0400158 else \
David Howellsdf9ee292010-10-07 14:08:55 +0100159 arch_local_irq_enable(); \
Chris Metcalf867e3592010-05-28 23:09:12 -0400160} while (0)
161
162/* Return true if "flags" argument means interrupts are disabled. */
David Howellsdf9ee292010-10-07 14:08:55 +0100163#define arch_irqs_disabled_flags(flags) ((flags) != 0)
Chris Metcalf867e3592010-05-28 23:09:12 -0400164
165/* Return true if interrupts are currently disabled. */
David Howellsdf9ee292010-10-07 14:08:55 +0100166#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
Chris Metcalf867e3592010-05-28 23:09:12 -0400167
168/* Save whether interrupts are currently disabled. */
David Howellsdf9ee292010-10-07 14:08:55 +0100169#define arch_local_save_flags() arch_irqs_disabled()
Chris Metcalf867e3592010-05-28 23:09:12 -0400170
171/* Save whether interrupts are currently disabled, then disable them. */
David Howellsdf9ee292010-10-07 14:08:55 +0100172#define arch_local_irq_save() ({ \
173 unsigned long __flags = arch_local_save_flags(); \
174 arch_local_irq_disable(); \
175 __flags; })
Chris Metcalf867e3592010-05-28 23:09:12 -0400176
177/* Prevent the given interrupt from being enabled next time we enable irqs. */
David Howellsdf9ee292010-10-07 14:08:55 +0100178#define arch_local_irq_mask(interrupt) \
Chris Metcalfbc1a2982013-08-07 11:36:54 -0400179 this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
Chris Metcalf867e3592010-05-28 23:09:12 -0400180
181/* Prevent the given interrupt from being enabled immediately. */
David Howellsdf9ee292010-10-07 14:08:55 +0100182#define arch_local_irq_mask_now(interrupt) do { \
183 arch_local_irq_mask(interrupt); \
Chris Metcalf867e3592010-05-28 23:09:12 -0400184 interrupt_mask_set(interrupt); \
185} while (0)
186
187/* Allow the given interrupt to be enabled next time we enable irqs. */
David Howellsdf9ee292010-10-07 14:08:55 +0100188#define arch_local_irq_unmask(interrupt) \
Chris Metcalfbc1a2982013-08-07 11:36:54 -0400189 this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
Chris Metcalf867e3592010-05-28 23:09:12 -0400190
191/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
David Howellsdf9ee292010-10-07 14:08:55 +0100192#define arch_local_irq_unmask_now(interrupt) do { \
193 arch_local_irq_unmask(interrupt); \
Chris Metcalf867e3592010-05-28 23:09:12 -0400194 if (!irqs_disabled()) \
195 interrupt_mask_reset(interrupt); \
196} while (0)
197
198#else /* __ASSEMBLY__ */
199
200/* We provide a somewhat more restricted set for assembly. */
201
202#ifdef __tilegx__
203
204#if INT_MEM_ERROR != 0
Chris Metcalf51007002012-03-27 15:40:20 -0400205# error Fix IRQS_DISABLED() macro
Chris Metcalf867e3592010-05-28 23:09:12 -0400206#endif
207
208/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
209#define IRQS_DISABLED(tmp) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400210 mfspr tmp, SPR_INTERRUPT_MASK_K; \
Chris Metcalf867e3592010-05-28 23:09:12 -0400211 andi tmp, tmp, 1
212
213/* Load up a pointer to &interrupts_enabled_mask. */
214#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400215 moveli reg, hw2_last(interrupts_enabled_mask); \
216 shl16insli reg, reg, hw1(interrupts_enabled_mask); \
217 shl16insli reg, reg, hw0(interrupts_enabled_mask); \
Chris Metcalf867e3592010-05-28 23:09:12 -0400218 add reg, reg, tp
219
220/* Disable interrupts. */
221#define IRQ_DISABLE(tmp0, tmp1) \
222 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
223 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
224 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400225 mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
Chris Metcalf867e3592010-05-28 23:09:12 -0400226
227/* Disable ALL synchronous interrupts (used by NMI entry). */
228#define IRQ_DISABLE_ALL(tmp) \
229 movei tmp, -1; \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400230 mtspr SPR_INTERRUPT_MASK_SET_K, tmp
Chris Metcalf867e3592010-05-28 23:09:12 -0400231
232/* Enable interrupts. */
Chris Metcalf51007002012-03-27 15:40:20 -0400233#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
Chris Metcalf867e3592010-05-28 23:09:12 -0400234 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
Chris Metcalf51007002012-03-27 15:40:20 -0400235 ld tmp0, tmp0
236#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400237 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
Chris Metcalf867e3592010-05-28 23:09:12 -0400238
239#else /* !__tilegx__ */
240
241/*
242 * Return 0 or 1 to indicate whether interrupts are currently disabled.
243 * Note that it's important that we use a bit from the "low" mask word,
244 * since when we are enabling, that is the word we write first, so if we
245 * are interrupted after only writing half of the mask, the interrupt
246 * handler will correctly observe that we have interrupts enabled, and
247 * will enable interrupts itself on return from the interrupt handler
248 * (making the original code's write of the "high" mask word idempotent).
249 */
250#define IRQS_DISABLED(tmp) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400251 mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
Chris Metcalf867e3592010-05-28 23:09:12 -0400252 shri tmp, tmp, INT_MEM_ERROR; \
253 andi tmp, tmp, 1
254
255/* Load up a pointer to &interrupts_enabled_mask. */
256#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400257 moveli reg, lo16(interrupts_enabled_mask); \
258 auli reg, reg, ha16(interrupts_enabled_mask); \
Chris Metcalf867e3592010-05-28 23:09:12 -0400259 add reg, reg, tp
260
261/* Disable interrupts. */
262#define IRQ_DISABLE(tmp0, tmp1) \
263 { \
Chris Metcalf7f04f082013-02-01 13:01:36 -0500264 movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
Chris Metcalf0dccb042011-03-17 14:32:06 -0400265 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
Chris Metcalf867e3592010-05-28 23:09:12 -0400266 }; \
267 { \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400268 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
Chris Metcalf0dccb042011-03-17 14:32:06 -0400269 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
Chris Metcalf867e3592010-05-28 23:09:12 -0400270 }; \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400271 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
Chris Metcalf867e3592010-05-28 23:09:12 -0400272
273/* Disable ALL synchronous interrupts (used by NMI entry). */
274#define IRQ_DISABLE_ALL(tmp) \
275 movei tmp, -1; \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400276 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
277 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
Chris Metcalf867e3592010-05-28 23:09:12 -0400278
279/* Enable interrupts. */
Chris Metcalf51007002012-03-27 15:40:20 -0400280#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
Chris Metcalf867e3592010-05-28 23:09:12 -0400281 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
282 { \
283 lw tmp0, tmp0; \
284 addi tmp1, tmp0, 4 \
285 }; \
Chris Metcalf51007002012-03-27 15:40:20 -0400286 lw tmp1, tmp1
287#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
Chris Metcalfa78c9422010-10-14 16:23:03 -0400288 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
289 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
Chris Metcalf867e3592010-05-28 23:09:12 -0400290#endif
291
Chris Metcalf51007002012-03-27 15:40:20 -0400292#define IRQ_ENABLE(tmp0, tmp1) \
293 IRQ_ENABLE_LOAD(tmp0, tmp1); \
294 IRQ_ENABLE_APPLY(tmp0, tmp1)
295
Chris Metcalf867e3592010-05-28 23:09:12 -0400296/*
297 * Do the CPU's IRQ-state tracing from assembly code. We call a
298 * C function, but almost everywhere we do, we don't mind clobbering
299 * all the caller-saved registers.
300 */
301#ifdef CONFIG_TRACE_IRQFLAGS
302# define TRACE_IRQS_ON jal trace_hardirqs_on
303# define TRACE_IRQS_OFF jal trace_hardirqs_off
304#else
305# define TRACE_IRQS_ON
306# define TRACE_IRQS_OFF
307#endif
308
309#endif /* __ASSEMBLY__ */
310
311#endif /* _ASM_TILE_IRQFLAGS_H */