blob: 2fd1fb89ec366ddd51fbf4071e726ae35977cdac [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Waiman Longa33fda32015-04-24 14:56:30 -04002/*
3 * Queued spinlock
4 *
Waiman Longa33fda32015-04-24 14:56:30 -04005 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
10#define __ASM_GENERIC_QSPINLOCK_TYPES_H
11
Waiman Longa33fda32015-04-24 14:56:30 -040012#include <linux/types.h>
Waiman Longa33fda32015-04-24 14:56:30 -040013
14typedef struct qspinlock {
Will Deacon625e88b2018-04-26 11:34:16 +010015 union {
16 atomic_t val;
17
18 /*
19 * By using the whole 2nd least significant byte for the
20 * pending bit, we can allow better optimization of the lock
21 * acquisition for the pending bit holder.
22 */
23#ifdef __LITTLE_ENDIAN
24 struct {
25 u8 locked;
26 u8 pending;
27 };
28 struct {
29 u16 locked_pending;
30 u16 tail;
31 };
32#else
33 struct {
34 u16 tail;
35 u16 locked_pending;
36 };
37 struct {
38 u8 reserved[2];
39 u8 pending;
40 u8 locked;
41 };
42#endif
43 };
Waiman Longa33fda32015-04-24 14:56:30 -040044} arch_spinlock_t;
45
46/*
Dan Streetmanb82e5302016-02-19 13:49:27 -050047 * Initializier
48 */
Steven Rostedt (VMware)6cc65be2018-06-21 20:35:26 -040049#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
Dan Streetmanb82e5302016-02-19 13:49:27 -050050
51/*
Waiman Longa33fda32015-04-24 14:56:30 -040052 * Bitfields in the atomic value:
53 *
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040054 * When NR_CPUS < 16K
55 * 0- 7: locked byte
56 * 8: pending
57 * 9-15: not used
58 * 16-17: tail index
59 * 18-31: tail cpu (+1)
60 *
61 * When NR_CPUS >= 16K
Waiman Longa33fda32015-04-24 14:56:30 -040062 * 0- 7: locked byte
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -040063 * 8: pending
64 * 9-10: tail index
65 * 11-31: tail cpu (+1)
Waiman Longa33fda32015-04-24 14:56:30 -040066 */
67#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
68 << _Q_ ## type ## _OFFSET)
69#define _Q_LOCKED_OFFSET 0
70#define _Q_LOCKED_BITS 8
71#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
72
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -040073#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040074#if CONFIG_NR_CPUS < (1U << 14)
75#define _Q_PENDING_BITS 8
76#else
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -040077#define _Q_PENDING_BITS 1
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040078#endif
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -040079#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
80
81#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
Waiman Longa33fda32015-04-24 14:56:30 -040082#define _Q_TAIL_IDX_BITS 2
83#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
84
85#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
86#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
87#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
88
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040089#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
Waiman Long6403bd72015-04-24 14:56:33 -040090#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
91
Waiman Longa33fda32015-04-24 14:56:30 -040092#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -040093#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
Waiman Longa33fda32015-04-24 14:56:30 -040094
95#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */