| #ifndef __LINUX_SPINLOCK_TYPES_RAW_H |
| #define __LINUX_SPINLOCK_TYPES_RAW_H |
| |
| #include <linux/types.h> |
| |
| #if defined(CONFIG_SMP) |
| # include <asm/spinlock_types.h> |
| #else |
| # include <linux/spinlock_types_up.h> |
| #endif |
| |
| #include <linux/lockdep_types.h> |
| |
| typedef struct raw_spinlock { |
| arch_spinlock_t raw_lock; |
| #ifdef CONFIG_DEBUG_SPINLOCK |
| unsigned int magic, owner_cpu; |
| void *owner; |
| #endif |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| struct lockdep_map dep_map; |
| #endif |
| } raw_spinlock_t; |
| |
| #define SPINLOCK_MAGIC 0xdead4ead |
| |
| #define SPINLOCK_OWNER_INIT ((void *)-1L) |
| |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| # define RAW_SPIN_DEP_MAP_INIT(lockname) \ |
| .dep_map = { \ |
| .name = #lockname, \ |
| .wait_type_inner = LD_WAIT_SPIN, \ |
| } |
| # define SPIN_DEP_MAP_INIT(lockname) \ |
| .dep_map = { \ |
| .name = #lockname, \ |
| .wait_type_inner = LD_WAIT_CONFIG, \ |
| } |
| |
| # define LOCAL_SPIN_DEP_MAP_INIT(lockname) \ |
| .dep_map = { \ |
| .name = #lockname, \ |
| .wait_type_inner = LD_WAIT_CONFIG, \ |
| .lock_type = LD_LOCK_PERCPU, \ |
| } |
| #else |
| # define RAW_SPIN_DEP_MAP_INIT(lockname) |
| # define SPIN_DEP_MAP_INIT(lockname) |
| # define LOCAL_SPIN_DEP_MAP_INIT(lockname) |
| #endif |
| |
| #ifdef CONFIG_DEBUG_SPINLOCK |
| # define SPIN_DEBUG_INIT(lockname) \ |
| .magic = SPINLOCK_MAGIC, \ |
| .owner_cpu = -1, \ |
| .owner = SPINLOCK_OWNER_INIT, |
| #else |
| # define SPIN_DEBUG_INIT(lockname) |
| #endif |
| |
| #define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ |
| { \ |
| .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ |
| SPIN_DEBUG_INIT(lockname) \ |
| RAW_SPIN_DEP_MAP_INIT(lockname) } |
| |
| #define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ |
| (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) |
| |
| #define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) |
| |
| #endif /* __LINUX_SPINLOCK_TYPES_RAW_H */ |