blob: caf895789a1ee642613f11b1757ded24794f0fa9 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paul Mackerras09d4e0e2009-06-12 21:10:05 +00002/*
3 * Generic implementation of 64-bit atomics using spinlocks,
4 * useful on processors that don't have 64-bit atomic instructions.
5 *
6 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
Paul Mackerras09d4e0e2009-06-12 21:10:05 +00007 */
8#include <linux/types.h>
9#include <linux/cache.h>
10#include <linux/spinlock.h>
11#include <linux/init.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050012#include <linux/export.h>
Arun Sharma600634972011-07-26 16:09:06 -070013#include <linux/atomic.h>
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000014
15/*
16 * We use a hashed array of spinlocks to provide exclusive access
17 * to each atomic64_t variable. Since this is expected to used on
18 * systems with small numbers of CPUs (<= 4 or so), we use a
19 * relatively small array of 16 spinlocks to avoid wasting too much
20 * memory on the spinlock array.
21 */
22#define NR_LOCKS 16
23
24/*
25 * Ensure each lock is in a separate cacheline.
26 */
27static union {
Shan Haif59ca052011-09-01 11:32:03 +080028 raw_spinlock_t lock;
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000029 char pad[L1_CACHE_BYTES];
Stephen Boydfcc16882012-12-19 23:39:48 -080030} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
31 [0 ... (NR_LOCKS - 1)] = {
32 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
33 },
34};
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000035
Yong Zhangcb475de2011-09-14 15:49:24 +080036static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000037{
38 unsigned long addr = (unsigned long) v;
39
40 addr >>= L1_CACHE_SHIFT;
41 addr ^= (addr >> 8) ^ (addr >> 16);
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43}
44
Mark Rutland1bdadf42021-05-25 15:02:09 +010045s64 generic_atomic64_read(const atomic64_t *v)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000046{
47 unsigned long flags;
Yong Zhangcb475de2011-09-14 15:49:24 +080048 raw_spinlock_t *lock = lock_addr(v);
Mark Rutland92558132019-05-22 14:22:35 +010049 s64 val;
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000050
Shan Haif59ca052011-09-01 11:32:03 +080051 raw_spin_lock_irqsave(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000052 val = v->counter;
Shan Haif59ca052011-09-01 11:32:03 +080053 raw_spin_unlock_irqrestore(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000054 return val;
55}
Mark Rutland1bdadf42021-05-25 15:02:09 +010056EXPORT_SYMBOL(generic_atomic64_read);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000057
Mark Rutland1bdadf42021-05-25 15:02:09 +010058void generic_atomic64_set(atomic64_t *v, s64 i)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000059{
60 unsigned long flags;
Yong Zhangcb475de2011-09-14 15:49:24 +080061 raw_spinlock_t *lock = lock_addr(v);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000062
Shan Haif59ca052011-09-01 11:32:03 +080063 raw_spin_lock_irqsave(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000064 v->counter = i;
Shan Haif59ca052011-09-01 11:32:03 +080065 raw_spin_unlock_irqrestore(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000066}
Mark Rutland1bdadf42021-05-25 15:02:09 +010067EXPORT_SYMBOL(generic_atomic64_set);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000068
Peter Zijlstra560cb122014-04-23 16:12:30 +020069#define ATOMIC64_OP(op, c_op) \
Mark Rutland1bdadf42021-05-25 15:02:09 +010070void generic_atomic64_##op(s64 a, atomic64_t *v) \
Peter Zijlstra560cb122014-04-23 16:12:30 +020071{ \
72 unsigned long flags; \
73 raw_spinlock_t *lock = lock_addr(v); \
74 \
75 raw_spin_lock_irqsave(lock, flags); \
76 v->counter c_op a; \
77 raw_spin_unlock_irqrestore(lock, flags); \
78} \
Mark Rutland1bdadf42021-05-25 15:02:09 +010079EXPORT_SYMBOL(generic_atomic64_##op);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000080
Peter Zijlstra560cb122014-04-23 16:12:30 +020081#define ATOMIC64_OP_RETURN(op, c_op) \
Mark Rutland1bdadf42021-05-25 15:02:09 +010082s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
Peter Zijlstra560cb122014-04-23 16:12:30 +020083{ \
84 unsigned long flags; \
85 raw_spinlock_t *lock = lock_addr(v); \
Mark Rutland92558132019-05-22 14:22:35 +010086 s64 val; \
Peter Zijlstra560cb122014-04-23 16:12:30 +020087 \
88 raw_spin_lock_irqsave(lock, flags); \
89 val = (v->counter c_op a); \
90 raw_spin_unlock_irqrestore(lock, flags); \
91 return val; \
92} \
Mark Rutland1bdadf42021-05-25 15:02:09 +010093EXPORT_SYMBOL(generic_atomic64_##op##_return);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +000094
Peter Zijlstra28aa2bd2016-04-18 00:54:38 +020095#define ATOMIC64_FETCH_OP(op, c_op) \
Mark Rutland1bdadf42021-05-25 15:02:09 +010096s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
Peter Zijlstra28aa2bd2016-04-18 00:54:38 +020097{ \
98 unsigned long flags; \
99 raw_spinlock_t *lock = lock_addr(v); \
Mark Rutland92558132019-05-22 14:22:35 +0100100 s64 val; \
Peter Zijlstra28aa2bd2016-04-18 00:54:38 +0200101 \
102 raw_spin_lock_irqsave(lock, flags); \
103 val = v->counter; \
104 v->counter c_op a; \
105 raw_spin_unlock_irqrestore(lock, flags); \
106 return val; \
107} \
Mark Rutland1bdadf42021-05-25 15:02:09 +0100108EXPORT_SYMBOL(generic_atomic64_fetch_##op);
Peter Zijlstra28aa2bd2016-04-18 00:54:38 +0200109
Peter Zijlstra560cb122014-04-23 16:12:30 +0200110#define ATOMIC64_OPS(op, c_op) \
111 ATOMIC64_OP(op, c_op) \
Peter Zijlstra28aa2bd2016-04-18 00:54:38 +0200112 ATOMIC64_OP_RETURN(op, c_op) \
113 ATOMIC64_FETCH_OP(op, c_op)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000114
Peter Zijlstra560cb122014-04-23 16:12:30 +0200115ATOMIC64_OPS(add, +=)
116ATOMIC64_OPS(sub, -=)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000117
Peter Zijlstra560cb122014-04-23 16:12:30 +0200118#undef ATOMIC64_OPS
Peter Zijlstra28aa2bd2016-04-18 00:54:38 +0200119#define ATOMIC64_OPS(op, c_op) \
120 ATOMIC64_OP(op, c_op) \
Peter Zijlstra28aa2bd2016-04-18 00:54:38 +0200121 ATOMIC64_FETCH_OP(op, c_op)
122
123ATOMIC64_OPS(and, &=)
124ATOMIC64_OPS(or, |=)
125ATOMIC64_OPS(xor, ^=)
126
127#undef ATOMIC64_OPS
128#undef ATOMIC64_FETCH_OP
Peter Zijlstra560cb122014-04-23 16:12:30 +0200129#undef ATOMIC64_OP
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000130
Mark Rutland1bdadf42021-05-25 15:02:09 +0100131s64 generic_atomic64_dec_if_positive(atomic64_t *v)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000132{
133 unsigned long flags;
Yong Zhangcb475de2011-09-14 15:49:24 +0800134 raw_spinlock_t *lock = lock_addr(v);
Mark Rutland92558132019-05-22 14:22:35 +0100135 s64 val;
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000136
Shan Haif59ca052011-09-01 11:32:03 +0800137 raw_spin_lock_irqsave(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000138 val = v->counter - 1;
139 if (val >= 0)
140 v->counter = val;
Shan Haif59ca052011-09-01 11:32:03 +0800141 raw_spin_unlock_irqrestore(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000142 return val;
143}
Mark Rutland1bdadf42021-05-25 15:02:09 +0100144EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000145
Mark Rutland1bdadf42021-05-25 15:02:09 +0100146s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000147{
148 unsigned long flags;
Yong Zhangcb475de2011-09-14 15:49:24 +0800149 raw_spinlock_t *lock = lock_addr(v);
Mark Rutland92558132019-05-22 14:22:35 +0100150 s64 val;
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000151
Shan Haif59ca052011-09-01 11:32:03 +0800152 raw_spin_lock_irqsave(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000153 val = v->counter;
154 if (val == o)
155 v->counter = n;
Shan Haif59ca052011-09-01 11:32:03 +0800156 raw_spin_unlock_irqrestore(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000157 return val;
158}
Mark Rutland1bdadf42021-05-25 15:02:09 +0100159EXPORT_SYMBOL(generic_atomic64_cmpxchg);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000160
Mark Rutland1bdadf42021-05-25 15:02:09 +0100161s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000162{
163 unsigned long flags;
Yong Zhangcb475de2011-09-14 15:49:24 +0800164 raw_spinlock_t *lock = lock_addr(v);
Mark Rutland92558132019-05-22 14:22:35 +0100165 s64 val;
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000166
Shan Haif59ca052011-09-01 11:32:03 +0800167 raw_spin_lock_irqsave(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000168 val = v->counter;
169 v->counter = new;
Shan Haif59ca052011-09-01 11:32:03 +0800170 raw_spin_unlock_irqrestore(lock, flags);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000171 return val;
172}
Mark Rutland1bdadf42021-05-25 15:02:09 +0100173EXPORT_SYMBOL(generic_atomic64_xchg);
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000174
Mark Rutland1bdadf42021-05-25 15:02:09 +0100175s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000176{
177 unsigned long flags;
Yong Zhangcb475de2011-09-14 15:49:24 +0800178 raw_spinlock_t *lock = lock_addr(v);
Mark Rutland92558132019-05-22 14:22:35 +0100179 s64 val;
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000180
Shan Haif59ca052011-09-01 11:32:03 +0800181 raw_spin_lock_irqsave(lock, flags);
Mark Rutland00b808a2018-06-21 13:13:11 +0100182 val = v->counter;
183 if (val != u)
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000184 v->counter += a;
Shan Haif59ca052011-09-01 11:32:03 +0800185 raw_spin_unlock_irqrestore(lock, flags);
Mark Rutland00b808a2018-06-21 13:13:11 +0100186
187 return val;
Paul Mackerras09d4e0e2009-06-12 21:10:05 +0000188}
Mark Rutland1bdadf42021-05-25 15:02:09 +0100189EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);