blob: 326167e4783a9942770d0dd19bed922c7b051100 [file] [log] [blame]
Stefan Kristianssonbc195982014-05-13 22:30:56 +03001/*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 *
4 * This file is licensed under the terms of the GNU General Public License
5 * version 2. This program is licensed "as is" without any warranty of any
6 * kind, whether express or implied.
7 */
8
9#ifndef __ASM_OPENRISC_ATOMIC_H
10#define __ASM_OPENRISC_ATOMIC_H
11
12#include <linux/types.h>
13
14/* Atomically perform op with v->counter and i */
15#define ATOMIC_OP(op) \
Mark Rutland3f1e9312021-05-25 15:02:24 +010016static inline void arch_atomic_##op(int i, atomic_t *v) \
Stefan Kristianssonbc195982014-05-13 22:30:56 +030017{ \
18 int tmp; \
19 \
20 __asm__ __volatile__( \
21 "1: l.lwa %0,0(%1) \n" \
22 " l." #op " %0,%0,%2 \n" \
23 " l.swa 0(%1),%0 \n" \
24 " l.bnf 1b \n" \
25 " l.nop \n" \
26 : "=&r"(tmp) \
27 : "r"(&v->counter), "r"(i) \
28 : "cc", "memory"); \
29}
30
31/* Atomically perform op with v->counter and i, return the result */
32#define ATOMIC_OP_RETURN(op) \
Mark Rutland3f1e9312021-05-25 15:02:24 +010033static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
Stefan Kristianssonbc195982014-05-13 22:30:56 +030034{ \
35 int tmp; \
36 \
37 __asm__ __volatile__( \
38 "1: l.lwa %0,0(%1) \n" \
39 " l." #op " %0,%0,%2 \n" \
40 " l.swa 0(%1),%0 \n" \
41 " l.bnf 1b \n" \
42 " l.nop \n" \
43 : "=&r"(tmp) \
44 : "r"(&v->counter), "r"(i) \
45 : "cc", "memory"); \
46 \
47 return tmp; \
48}
49
50/* Atomically perform op with v->counter and i, return orig v->counter */
51#define ATOMIC_FETCH_OP(op) \
Mark Rutland3f1e9312021-05-25 15:02:24 +010052static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
Stefan Kristianssonbc195982014-05-13 22:30:56 +030053{ \
54 int tmp, old; \
55 \
56 __asm__ __volatile__( \
57 "1: l.lwa %0,0(%2) \n" \
58 " l." #op " %1,%0,%3 \n" \
59 " l.swa 0(%2),%1 \n" \
60 " l.bnf 1b \n" \
61 " l.nop \n" \
62 : "=&r"(old), "=&r"(tmp) \
63 : "r"(&v->counter), "r"(i) \
64 : "cc", "memory"); \
65 \
66 return old; \
67}
68
69ATOMIC_OP_RETURN(add)
70ATOMIC_OP_RETURN(sub)
71
72ATOMIC_FETCH_OP(add)
73ATOMIC_FETCH_OP(sub)
74ATOMIC_FETCH_OP(and)
75ATOMIC_FETCH_OP(or)
76ATOMIC_FETCH_OP(xor)
77
Mark Rutland3f1e9312021-05-25 15:02:24 +010078ATOMIC_OP(add)
79ATOMIC_OP(sub)
Stefan Kristianssonbc195982014-05-13 22:30:56 +030080ATOMIC_OP(and)
81ATOMIC_OP(or)
82ATOMIC_OP(xor)
83
84#undef ATOMIC_FETCH_OP
85#undef ATOMIC_OP_RETURN
86#undef ATOMIC_OP
87
Mark Rutland3f1e9312021-05-25 15:02:24 +010088#define arch_atomic_add_return arch_atomic_add_return
89#define arch_atomic_sub_return arch_atomic_sub_return
90#define arch_atomic_fetch_add arch_atomic_fetch_add
91#define arch_atomic_fetch_sub arch_atomic_fetch_sub
92#define arch_atomic_fetch_and arch_atomic_fetch_and
93#define arch_atomic_fetch_or arch_atomic_fetch_or
94#define arch_atomic_fetch_xor arch_atomic_fetch_xor
95#define arch_atomic_add arch_atomic_add
96#define arch_atomic_sub arch_atomic_sub
97#define arch_atomic_and arch_atomic_and
98#define arch_atomic_or arch_atomic_or
99#define arch_atomic_xor arch_atomic_xor
Stefan Kristianssonbc195982014-05-13 22:30:56 +0300100
101/*
102 * Atomically add a to v->counter as long as v is not already u.
103 * Returns the original value at v->counter.
104 *
105 * This is often used through atomic_inc_not_zero()
106 */
Mark Rutland3f1e9312021-05-25 15:02:24 +0100107static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
Stefan Kristianssonbc195982014-05-13 22:30:56 +0300108{
109 int old, tmp;
110
111 __asm__ __volatile__(
112 "1: l.lwa %0, 0(%2) \n"
113 " l.sfeq %0, %4 \n"
114 " l.bf 2f \n"
115 " l.add %1, %0, %3 \n"
116 " l.swa 0(%2), %1 \n"
117 " l.bnf 1b \n"
118 " l.nop \n"
119 "2: \n"
120 : "=&r"(old), "=&r" (tmp)
121 : "r"(&v->counter), "r"(a), "r"(u)
122 : "cc", "memory");
123
124 return old;
125}
Mark Rutland3f1e9312021-05-25 15:02:24 +0100126#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
Stefan Kristianssonbc195982014-05-13 22:30:56 +0300127
Mark Rutland3f1e9312021-05-25 15:02:24 +0100128#define arch_atomic_read(v) READ_ONCE((v)->counter)
129#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
Mark Rutlandf0c7bf12021-05-25 15:02:04 +0100130
131#include <asm/cmpxchg.h>
132
Mark Rutland3f1e9312021-05-25 15:02:24 +0100133#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
134#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
Stefan Kristianssonbc195982014-05-13 22:30:56 +0300135
136#endif /* __ASM_OPENRISC_ATOMIC_H */