Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Queued spinlock |
| 4 | * |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 5 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. |
| 6 | * |
| 7 | * Authors: Waiman Long <waiman.long@hp.com> |
| 8 | */ |
| 9 | #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H |
| 10 | #define __ASM_GENERIC_QSPINLOCK_TYPES_H |
| 11 | |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 12 | #include <linux/types.h> |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 13 | |
| 14 | typedef struct qspinlock { |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 15 | union { |
| 16 | atomic_t val; |
| 17 | |
| 18 | /* |
| 19 | * By using the whole 2nd least significant byte for the |
| 20 | * pending bit, we can allow better optimization of the lock |
| 21 | * acquisition for the pending bit holder. |
| 22 | */ |
| 23 | #ifdef __LITTLE_ENDIAN |
| 24 | struct { |
| 25 | u8 locked; |
| 26 | u8 pending; |
| 27 | }; |
| 28 | struct { |
| 29 | u16 locked_pending; |
| 30 | u16 tail; |
| 31 | }; |
| 32 | #else |
| 33 | struct { |
| 34 | u16 tail; |
| 35 | u16 locked_pending; |
| 36 | }; |
| 37 | struct { |
| 38 | u8 reserved[2]; |
| 39 | u8 pending; |
| 40 | u8 locked; |
| 41 | }; |
| 42 | #endif |
| 43 | }; |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 44 | } arch_spinlock_t; |
| 45 | |
| 46 | /* |
Dan Streetman | b82e530 | 2016-02-19 13:49:27 -0500 | [diff] [blame] | 47 | * Initializier |
| 48 | */ |
Steven Rostedt (VMware) | 6cc65be | 2018-06-21 20:35:26 -0400 | [diff] [blame] | 49 | #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } } |
Dan Streetman | b82e530 | 2016-02-19 13:49:27 -0500 | [diff] [blame] | 50 | |
| 51 | /* |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 52 | * Bitfields in the atomic value: |
| 53 | * |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 54 | * When NR_CPUS < 16K |
| 55 | * 0- 7: locked byte |
| 56 | * 8: pending |
| 57 | * 9-15: not used |
| 58 | * 16-17: tail index |
| 59 | * 18-31: tail cpu (+1) |
| 60 | * |
| 61 | * When NR_CPUS >= 16K |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 62 | * 0- 7: locked byte |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 63 | * 8: pending |
| 64 | * 9-10: tail index |
| 65 | * 11-31: tail cpu (+1) |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 66 | */ |
| 67 | #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ |
| 68 | << _Q_ ## type ## _OFFSET) |
| 69 | #define _Q_LOCKED_OFFSET 0 |
| 70 | #define _Q_LOCKED_BITS 8 |
| 71 | #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) |
| 72 | |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 73 | #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 74 | #if CONFIG_NR_CPUS < (1U << 14) |
| 75 | #define _Q_PENDING_BITS 8 |
| 76 | #else |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 77 | #define _Q_PENDING_BITS 1 |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 78 | #endif |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 79 | #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) |
| 80 | |
| 81 | #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 82 | #define _Q_TAIL_IDX_BITS 2 |
| 83 | #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) |
| 84 | |
| 85 | #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) |
| 86 | #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) |
| 87 | #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) |
| 88 | |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 89 | #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET |
Waiman Long | 6403bd7 | 2015-04-24 14:56:33 -0400 | [diff] [blame] | 90 | #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) |
| 91 | |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 92 | #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 93 | #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 94 | |
| 95 | #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ |