Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Queued spinlock |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. |
| 15 | * |
| 16 | * Authors: Waiman Long <waiman.long@hp.com> |
| 17 | */ |
| 18 | #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H |
| 19 | #define __ASM_GENERIC_QSPINLOCK_TYPES_H |
| 20 | |
| 21 | /* |
| 22 | * Including atomic.h with PARAVIRT on will cause compilation errors because |
| 23 | * of recursive header file incluson via paravirt_types.h. So don't include |
| 24 | * it if PARAVIRT is on. |
| 25 | */ |
| 26 | #ifndef CONFIG_PARAVIRT |
| 27 | #include <linux/types.h> |
| 28 | #include <linux/atomic.h> |
| 29 | #endif |
| 30 | |
| 31 | typedef struct qspinlock { |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 32 | union { |
| 33 | atomic_t val; |
| 34 | |
| 35 | /* |
| 36 | * By using the whole 2nd least significant byte for the |
| 37 | * pending bit, we can allow better optimization of the lock |
| 38 | * acquisition for the pending bit holder. |
| 39 | */ |
| 40 | #ifdef __LITTLE_ENDIAN |
| 41 | struct { |
| 42 | u8 locked; |
| 43 | u8 pending; |
| 44 | }; |
| 45 | struct { |
| 46 | u16 locked_pending; |
| 47 | u16 tail; |
| 48 | }; |
| 49 | #else |
| 50 | struct { |
| 51 | u16 tail; |
| 52 | u16 locked_pending; |
| 53 | }; |
| 54 | struct { |
| 55 | u8 reserved[2]; |
| 56 | u8 pending; |
| 57 | u8 locked; |
| 58 | }; |
| 59 | #endif |
| 60 | }; |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 61 | } arch_spinlock_t; |
| 62 | |
| 63 | /* |
Dan Streetman | b82e530 | 2016-02-19 13:49:27 -0500 | [diff] [blame] | 64 | * Initializier |
| 65 | */ |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 66 | #define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) } |
Dan Streetman | b82e530 | 2016-02-19 13:49:27 -0500 | [diff] [blame] | 67 | |
| 68 | /* |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 69 | * Bitfields in the atomic value: |
| 70 | * |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 71 | * When NR_CPUS < 16K |
| 72 | * 0- 7: locked byte |
| 73 | * 8: pending |
| 74 | * 9-15: not used |
| 75 | * 16-17: tail index |
| 76 | * 18-31: tail cpu (+1) |
| 77 | * |
| 78 | * When NR_CPUS >= 16K |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 79 | * 0- 7: locked byte |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 80 | * 8: pending |
| 81 | * 9-10: tail index |
| 82 | * 11-31: tail cpu (+1) |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 83 | */ |
| 84 | #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ |
| 85 | << _Q_ ## type ## _OFFSET) |
| 86 | #define _Q_LOCKED_OFFSET 0 |
| 87 | #define _Q_LOCKED_BITS 8 |
| 88 | #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) |
| 89 | |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 90 | #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 91 | #if CONFIG_NR_CPUS < (1U << 14) |
| 92 | #define _Q_PENDING_BITS 8 |
| 93 | #else |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 94 | #define _Q_PENDING_BITS 1 |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 95 | #endif |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 96 | #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) |
| 97 | |
| 98 | #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 99 | #define _Q_TAIL_IDX_BITS 2 |
| 100 | #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) |
| 101 | |
| 102 | #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) |
| 103 | #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) |
| 104 | #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) |
| 105 | |
Peter Zijlstra (Intel) | 69f9cae | 2015-04-24 14:56:34 -0400 | [diff] [blame] | 106 | #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET |
Waiman Long | 6403bd7 | 2015-04-24 14:56:33 -0400 | [diff] [blame] | 107 | #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) |
| 108 | |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 109 | #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) |
Peter Zijlstra (Intel) | c1fb159 | 2015-04-24 14:56:32 -0400 | [diff] [blame] | 110 | #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 111 | |
| 112 | #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ |