Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 2 | #ifndef MM_SLAB_H |
| 3 | #define MM_SLAB_H |
| 4 | /* |
| 5 | * Internal slab definitions |
| 6 | */ |
Mike Rapoport (IBM) | d5d2c02 | 2023-03-21 19:05:11 +0200 | [diff] [blame] | 7 | void __init kmem_cache_init(void); |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 8 | |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 9 | /* Reuses the bits in struct page */ |
| 10 | struct slab { |
| 11 | unsigned long __page_flags; |
Vlastimil Babka | 401fb12 | 2021-11-04 11:30:58 +0100 | [diff] [blame] | 12 | |
| 13 | #if defined(CONFIG_SLAB) |
| 14 | |
Vlastimil Babka | 130d4df | 2022-08-26 11:09:12 +0200 | [diff] [blame] | 15 | struct kmem_cache *slab_cache; |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 16 | union { |
Vlastimil Babka | 130d4df | 2022-08-26 11:09:12 +0200 | [diff] [blame] | 17 | struct { |
| 18 | struct list_head slab_list; |
| 19 | void *freelist; /* array of free object indexes */ |
| 20 | void *s_mem; /* first object */ |
| 21 | }; |
Vlastimil Babka | 401fb12 | 2021-11-04 11:30:58 +0100 | [diff] [blame] | 22 | struct rcu_head rcu_head; |
| 23 | }; |
Vlastimil Babka | 401fb12 | 2021-11-04 11:30:58 +0100 | [diff] [blame] | 24 | unsigned int active; |
| 25 | |
| 26 | #elif defined(CONFIG_SLUB) |
| 27 | |
Vlastimil Babka | 401fb12 | 2021-11-04 11:30:58 +0100 | [diff] [blame] | 28 | struct kmem_cache *slab_cache; |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 29 | union { |
Vlastimil Babka | 401fb12 | 2021-11-04 11:30:58 +0100 | [diff] [blame] | 30 | struct { |
Vlastimil Babka | 130d4df | 2022-08-26 11:09:12 +0200 | [diff] [blame] | 31 | union { |
| 32 | struct list_head slab_list; |
| 33 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 34 | struct { |
| 35 | struct slab *next; |
| 36 | int slabs; /* Nr of slabs left */ |
| 37 | }; |
| 38 | #endif |
| 39 | }; |
| 40 | /* Double-word boundary */ |
| 41 | void *freelist; /* first free object */ |
| 42 | union { |
| 43 | unsigned long counters; |
| 44 | struct { |
| 45 | unsigned inuse:16; |
| 46 | unsigned objects:15; |
| 47 | unsigned frozen:1; |
| 48 | }; |
| 49 | }; |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 50 | }; |
Vlastimil Babka | 130d4df | 2022-08-26 11:09:12 +0200 | [diff] [blame] | 51 | struct rcu_head rcu_head; |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 52 | }; |
Vlastimil Babka | 401fb12 | 2021-11-04 11:30:58 +0100 | [diff] [blame] | 53 | unsigned int __unused; |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 54 | |
Vlastimil Babka | 401fb12 | 2021-11-04 11:30:58 +0100 | [diff] [blame] | 55 | #else |
| 56 | #error "Unexpected slab allocator configured" |
| 57 | #endif |
| 58 | |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 59 | atomic_t __page_refcount; |
| 60 | #ifdef CONFIG_MEMCG |
| 61 | unsigned long memcg_data; |
| 62 | #endif |
| 63 | }; |
| 64 | |
| 65 | #define SLAB_MATCH(pg, sl) \ |
| 66 | static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) |
| 67 | SLAB_MATCH(flags, __page_flags); |
Vlastimil Babka | 130d4df | 2022-08-26 11:09:12 +0200 | [diff] [blame] | 68 | SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 69 | SLAB_MATCH(_refcount, __page_refcount); |
| 70 | #ifdef CONFIG_MEMCG |
| 71 | SLAB_MATCH(memcg_data, memcg_data); |
| 72 | #endif |
| 73 | #undef SLAB_MATCH |
| 74 | static_assert(sizeof(struct slab) <= sizeof(struct page)); |
Vlastimil Babka | 130d4df | 2022-08-26 11:09:12 +0200 | [diff] [blame] | 75 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB) |
| 76 | static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *))); |
| 77 | #endif |
Matthew Wilcox (Oracle) | d122019 | 2021-10-04 14:45:51 +0100 | [diff] [blame] | 78 | |
| 79 | /** |
| 80 | * folio_slab - Converts from folio to slab. |
| 81 | * @folio: The folio. |
| 82 | * |
| 83 | * Currently struct slab is a different representation of a folio where |
| 84 | * folio_test_slab() is true. |
| 85 | * |
| 86 | * Return: The slab which contains this folio. |
| 87 | */ |
| 88 | #define folio_slab(folio) (_Generic((folio), \ |
| 89 | const struct folio *: (const struct slab *)(folio), \ |
| 90 | struct folio *: (struct slab *)(folio))) |
| 91 | |
| 92 | /** |
| 93 | * slab_folio - The folio allocated for a slab |
| 94 | * @slab: The slab. |
| 95 | * |
| 96 | * Slabs are allocated as folios that contain the individual objects and are |
| 97 | * using some fields in the first struct page of the folio - those fields are |
| 98 | * now accessed by struct slab. It is occasionally necessary to convert back to |
| 99 | * a folio in order to communicate with the rest of the mm. Please use this |
| 100 | * helper function instead of casting yourself, as the implementation may change |
| 101 | * in the future. |
| 102 | */ |
| 103 | #define slab_folio(s) (_Generic((s), \ |
| 104 | const struct slab *: (const struct folio *)s, \ |
| 105 | struct slab *: (struct folio *)s)) |
| 106 | |
| 107 | /** |
| 108 | * page_slab - Converts from first struct page to slab. |
| 109 | * @p: The first (either head of compound or single) page of slab. |
| 110 | * |
| 111 | * A temporary wrapper to convert struct page to struct slab in situations where |
| 112 | * we know the page is the compound head, or single order-0 page. |
| 113 | * |
| 114 | * Long-term ideally everything would work with struct slab directly or go |
| 115 | * through folio to struct slab. |
| 116 | * |
| 117 | * Return: The slab which contains this page |
| 118 | */ |
| 119 | #define page_slab(p) (_Generic((p), \ |
| 120 | const struct page *: (const struct slab *)(p), \ |
| 121 | struct page *: (struct slab *)(p))) |
| 122 | |
| 123 | /** |
| 124 | * slab_page - The first struct page allocated for a slab |
| 125 | * @slab: The slab. |
| 126 | * |
| 127 | * A convenience wrapper for converting slab to the first struct page of the |
| 128 | * underlying folio, to communicate with code not yet converted to folio or |
| 129 | * struct slab. |
| 130 | */ |
| 131 | #define slab_page(s) folio_page(slab_folio(s), 0) |
| 132 | |
| 133 | /* |
| 134 | * If network-based swap is enabled, sl*b must keep track of whether pages |
| 135 | * were allocated from pfmemalloc reserves. |
| 136 | */ |
| 137 | static inline bool slab_test_pfmemalloc(const struct slab *slab) |
| 138 | { |
| 139 | return folio_test_active((struct folio *)slab_folio(slab)); |
| 140 | } |
| 141 | |
| 142 | static inline void slab_set_pfmemalloc(struct slab *slab) |
| 143 | { |
| 144 | folio_set_active(slab_folio(slab)); |
| 145 | } |
| 146 | |
| 147 | static inline void slab_clear_pfmemalloc(struct slab *slab) |
| 148 | { |
| 149 | folio_clear_active(slab_folio(slab)); |
| 150 | } |
| 151 | |
| 152 | static inline void __slab_clear_pfmemalloc(struct slab *slab) |
| 153 | { |
| 154 | __folio_clear_active(slab_folio(slab)); |
| 155 | } |
| 156 | |
| 157 | static inline void *slab_address(const struct slab *slab) |
| 158 | { |
| 159 | return folio_address(slab_folio(slab)); |
| 160 | } |
| 161 | |
| 162 | static inline int slab_nid(const struct slab *slab) |
| 163 | { |
| 164 | return folio_nid(slab_folio(slab)); |
| 165 | } |
| 166 | |
| 167 | static inline pg_data_t *slab_pgdat(const struct slab *slab) |
| 168 | { |
| 169 | return folio_pgdat(slab_folio(slab)); |
| 170 | } |
| 171 | |
| 172 | static inline struct slab *virt_to_slab(const void *addr) |
| 173 | { |
| 174 | struct folio *folio = virt_to_folio(addr); |
| 175 | |
| 176 | if (!folio_test_slab(folio)) |
| 177 | return NULL; |
| 178 | |
| 179 | return folio_slab(folio); |
| 180 | } |
| 181 | |
| 182 | static inline int slab_order(const struct slab *slab) |
| 183 | { |
| 184 | return folio_order((struct folio *)slab_folio(slab)); |
| 185 | } |
| 186 | |
| 187 | static inline size_t slab_size(const struct slab *slab) |
| 188 | { |
| 189 | return PAGE_SIZE << slab_order(slab); |
| 190 | } |
| 191 | |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 192 | #ifdef CONFIG_SLAB |
| 193 | #include <linux/slab_def.h> |
| 194 | #endif |
| 195 | |
| 196 | #ifdef CONFIG_SLUB |
| 197 | #include <linux/slub_def.h> |
| 198 | #endif |
| 199 | |
| 200 | #include <linux/memcontrol.h> |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 201 | #include <linux/fault-inject.h> |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 202 | #include <linux/kasan.h> |
| 203 | #include <linux/kmemleak.h> |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 204 | #include <linux/random.h> |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 205 | #include <linux/sched/mm.h> |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 206 | #include <linux/list_lru.h> |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 207 | |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 208 | /* |
| 209 | * State of the slab allocator. |
| 210 | * |
| 211 | * This is used to describe the states of the allocator during bootup. |
| 212 | * Allocators use this to gradually bootstrap themselves. Most allocators |
| 213 | * have the problem that the structures used for managing slab caches are |
| 214 | * allocated from slab caches themselves. |
| 215 | */ |
| 216 | enum slab_state { |
| 217 | DOWN, /* No slab functionality yet */ |
| 218 | PARTIAL, /* SLUB: kmem_cache_node available */ |
Christoph Lameter | ce8eb6c | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 219 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 220 | UP, /* Slab caches usable but not all extras yet */ |
| 221 | FULL /* Everything is working */ |
| 222 | }; |
| 223 | |
| 224 | extern enum slab_state slab_state; |
| 225 | |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 226 | /* The slab cache mutex protects the management structures during changes */ |
| 227 | extern struct mutex slab_mutex; |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 228 | |
| 229 | /* The list of all slab caches on the system */ |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 230 | extern struct list_head slab_caches; |
| 231 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 232 | /* The slab cache that manages slab cache information */ |
| 233 | extern struct kmem_cache *kmem_cache; |
| 234 | |
Vlastimil Babka | af3b5f8 | 2017-02-22 15:41:05 -0800 | [diff] [blame] | 235 | /* A table of kmalloc cache names and sizes */ |
| 236 | extern const struct kmalloc_info_struct { |
Pengfei Li | cb5d9fb | 2019-11-30 17:49:21 -0800 | [diff] [blame] | 237 | const char *name[NR_KMALLOC_TYPES]; |
Alexey Dobriyan | 55de8b9 | 2018-04-05 16:20:29 -0700 | [diff] [blame] | 238 | unsigned int size; |
Vlastimil Babka | af3b5f8 | 2017-02-22 15:41:05 -0800 | [diff] [blame] | 239 | } kmalloc_info[]; |
| 240 | |
Christoph Lameter | f97d5f63 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 241 | /* Kmalloc array related functions */ |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 242 | void setup_kmalloc_cache_index_table(void); |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 243 | void create_kmalloc_caches(slab_flags_t); |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 244 | |
| 245 | /* Find the kmalloc slab corresponding for a certain size */ |
| 246 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
Hyeonggon Yoo | ed4cd17 | 2022-08-17 19:18:20 +0900 | [diff] [blame] | 247 | |
| 248 | void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, |
| 249 | int node, size_t orig_size, |
| 250 | unsigned long caller); |
| 251 | void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); |
Christoph Lameter | f97d5f63 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 252 | |
Long Li | 4440509 | 2020-08-06 23:18:28 -0700 | [diff] [blame] | 253 | gfp_t kmalloc_fix_flags(gfp_t flags); |
Christoph Lameter | f97d5f63 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 254 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 255 | /* Functions provided by the slab allocators */ |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 256 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 257 | |
Alexey Dobriyan | 55de8b9 | 2018-04-05 16:20:29 -0700 | [diff] [blame] | 258 | struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, |
| 259 | slab_flags_t flags, unsigned int useroffset, |
| 260 | unsigned int usersize); |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 261 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
Alexey Dobriyan | 361d575 | 2018-04-05 16:20:33 -0700 | [diff] [blame] | 262 | unsigned int size, slab_flags_t flags, |
| 263 | unsigned int useroffset, unsigned int usersize); |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 264 | |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 265 | int slab_unmergeable(struct kmem_cache *s); |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 266 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 267 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 268 | struct kmem_cache * |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 269 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 270 | slab_flags_t flags, void (*ctor)(void *)); |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 271 | |
Alexey Dobriyan | 0293d1f | 2018-04-05 16:21:24 -0700 | [diff] [blame] | 272 | slab_flags_t kmem_cache_flags(unsigned int object_size, |
Nikolay Borisov | 3754000 | 2021-02-24 12:00:58 -0800 | [diff] [blame] | 273 | slab_flags_t flags, const char *name); |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 274 | |
Feng Tang | bb94429 | 2023-01-04 14:06:04 +0800 | [diff] [blame] | 275 | static inline bool is_kmalloc_cache(struct kmem_cache *s) |
| 276 | { |
Feng Tang | bb94429 | 2023-01-04 14:06:04 +0800 | [diff] [blame] | 277 | return (s->flags & SLAB_KMALLOC); |
Feng Tang | bb94429 | 2023-01-04 14:06:04 +0800 | [diff] [blame] | 278 | } |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 279 | |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 280 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
Nicolas Boichat | 6d6ea1e | 2019-03-28 20:43:42 -0700 | [diff] [blame] | 281 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
| 282 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 283 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 284 | |
| 285 | #if defined(CONFIG_DEBUG_SLAB) |
| 286 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
| 287 | #elif defined(CONFIG_SLUB_DEBUG) |
| 288 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
Laura Abbott | becfda6 | 2016-03-15 14:55:06 -0700 | [diff] [blame] | 289 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 290 | #else |
| 291 | #define SLAB_DEBUG_FLAGS (0) |
| 292 | #endif |
| 293 | |
| 294 | #if defined(CONFIG_SLAB) |
| 295 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
Vladimir Davydov | 230e9fc | 2016-01-14 15:18:15 -0800 | [diff] [blame] | 296 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 297 | SLAB_ACCOUNT) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 298 | #elif defined(CONFIG_SLUB) |
| 299 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
Feng Tang | 6cd6d33c | 2022-11-30 16:54:51 +0800 | [diff] [blame] | 300 | SLAB_TEMPORARY | SLAB_ACCOUNT | \ |
| 301 | SLAB_NO_USER_FLAGS | SLAB_KMALLOC) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 302 | #else |
Rustam Kovhaev | 34dbc3aa | 2021-11-19 16:43:37 -0800 | [diff] [blame] | 303 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 304 | #endif |
| 305 | |
Thomas Garnier | e70954f | 2016-12-12 16:41:38 -0800 | [diff] [blame] | 306 | /* Common flags available with current configuration */ |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 307 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
| 308 | |
Thomas Garnier | e70954f | 2016-12-12 16:41:38 -0800 | [diff] [blame] | 309 | /* Common flags permitted for kmem_cache_create */ |
| 310 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ |
| 311 | SLAB_RED_ZONE | \ |
| 312 | SLAB_POISON | \ |
| 313 | SLAB_STORE_USER | \ |
| 314 | SLAB_TRACE | \ |
| 315 | SLAB_CONSISTENCY_CHECKS | \ |
| 316 | SLAB_MEM_SPREAD | \ |
| 317 | SLAB_NOLEAKTRACE | \ |
| 318 | SLAB_RECLAIM_ACCOUNT | \ |
| 319 | SLAB_TEMPORARY | \ |
Hyeonggon Yoo | a285909 | 2022-04-06 15:00:03 +0900 | [diff] [blame] | 320 | SLAB_ACCOUNT | \ |
Feng Tang | 6cd6d33c | 2022-11-30 16:54:51 +0800 | [diff] [blame] | 321 | SLAB_KMALLOC | \ |
Hyeonggon Yoo | a285909 | 2022-04-06 15:00:03 +0900 | [diff] [blame] | 322 | SLAB_NO_USER_FLAGS) |
Thomas Garnier | e70954f | 2016-12-12 16:41:38 -0800 | [diff] [blame] | 323 | |
Shakeel Butt | f9e13c0 | 2018-04-05 16:21:57 -0700 | [diff] [blame] | 324 | bool __kmem_cache_empty(struct kmem_cache *); |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 325 | int __kmem_cache_shutdown(struct kmem_cache *); |
Dmitry Safonov | 52b4b95 | 2016-02-17 13:11:37 -0800 | [diff] [blame] | 326 | void __kmem_cache_release(struct kmem_cache *); |
Tejun Heo | c9fc586 | 2017-02-22 15:41:27 -0800 | [diff] [blame] | 327 | int __kmem_cache_shrink(struct kmem_cache *); |
Christoph Lameter | 41a2128 | 2014-05-06 12:50:08 -0700 | [diff] [blame] | 328 | void slab_kmem_cache_release(struct kmem_cache *); |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 329 | |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 330 | struct seq_file; |
| 331 | struct file; |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 332 | |
Glauber Costa | 0d7561c | 2012-10-19 18:20:27 +0400 | [diff] [blame] | 333 | struct slabinfo { |
| 334 | unsigned long active_objs; |
| 335 | unsigned long num_objs; |
| 336 | unsigned long active_slabs; |
| 337 | unsigned long num_slabs; |
| 338 | unsigned long shared_avail; |
| 339 | unsigned int limit; |
| 340 | unsigned int batchcount; |
| 341 | unsigned int shared; |
| 342 | unsigned int objects_per_slab; |
| 343 | unsigned int cache_order; |
| 344 | }; |
| 345 | |
| 346 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); |
| 347 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 348 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
| 349 | size_t count, loff_t *ppos); |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 350 | |
Muchun Song | 1a984c4 | 2020-12-14 19:06:24 -0800 | [diff] [blame] | 351 | static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 352 | { |
| 353 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
Roman Gushchin | d42f324 | 2020-08-06 23:20:39 -0700 | [diff] [blame] | 354 | NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 355 | } |
| 356 | |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 357 | #ifdef CONFIG_SLUB_DEBUG |
| 358 | #ifdef CONFIG_SLUB_DEBUG_ON |
| 359 | DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); |
| 360 | #else |
| 361 | DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); |
| 362 | #endif |
| 363 | extern void print_tracking(struct kmem_cache *s, void *object); |
Oliver Glitta | 1f9f78b | 2021-06-28 19:34:33 -0700 | [diff] [blame] | 364 | long validate_slab_cache(struct kmem_cache *s); |
Marco Elver | 0d4a062 | 2021-07-14 21:26:34 -0700 | [diff] [blame] | 365 | static inline bool __slub_debug_enabled(void) |
| 366 | { |
| 367 | return static_branch_unlikely(&slub_debug_enabled); |
| 368 | } |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 369 | #else |
| 370 | static inline void print_tracking(struct kmem_cache *s, void *object) |
| 371 | { |
| 372 | } |
Marco Elver | 0d4a062 | 2021-07-14 21:26:34 -0700 | [diff] [blame] | 373 | static inline bool __slub_debug_enabled(void) |
| 374 | { |
| 375 | return false; |
| 376 | } |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 377 | #endif |
| 378 | |
| 379 | /* |
| 380 | * Returns true if any of the specified slub_debug flags is enabled for the |
| 381 | * cache. Use only for flags parsed by setup_slub_debug() as it also enables |
| 382 | * the static key. |
| 383 | */ |
| 384 | static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) |
| 385 | { |
Marco Elver | 0d4a062 | 2021-07-14 21:26:34 -0700 | [diff] [blame] | 386 | if (IS_ENABLED(CONFIG_SLUB_DEBUG)) |
| 387 | VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); |
| 388 | if (__slub_debug_enabled()) |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 389 | return s->flags & flags; |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 390 | return false; |
| 391 | } |
| 392 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 393 | #ifdef CONFIG_MEMCG_KMEM |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 394 | /* |
| 395 | * slab_objcgs - get the object cgroups vector associated with a slab |
| 396 | * @slab: a pointer to the slab struct |
| 397 | * |
| 398 | * Returns a pointer to the object cgroups vector associated with the slab, |
| 399 | * or NULL if no such vector has been associated yet. |
| 400 | */ |
| 401 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) |
| 402 | { |
| 403 | unsigned long memcg_data = READ_ONCE(slab->memcg_data); |
| 404 | |
| 405 | VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), |
| 406 | slab_page(slab)); |
| 407 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); |
| 408 | |
| 409 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
| 410 | } |
| 411 | |
| 412 | int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, |
| 413 | gfp_t gfp, bool new_slab); |
Waiman Long | fdbcb2a | 2021-06-28 19:37:19 -0700 | [diff] [blame] | 414 | void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, |
| 415 | enum node_stat_item idx, int nr); |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 416 | |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 417 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 418 | { |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 419 | kfree(slab_objcgs(slab)); |
| 420 | slab->memcg_data = 0; |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 421 | } |
| 422 | |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 423 | static inline size_t obj_full_size(struct kmem_cache *s) |
| 424 | { |
| 425 | /* |
| 426 | * For each accounted object there is an extra space which is used |
| 427 | * to store obj_cgroup membership. Charge it too. |
| 428 | */ |
| 429 | return s->size + sizeof(struct obj_cgroup *); |
| 430 | } |
| 431 | |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 432 | /* |
| 433 | * Returns false if the allocation should fail. |
| 434 | */ |
| 435 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 436 | struct list_lru *lru, |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 437 | struct obj_cgroup **objcgp, |
| 438 | size_t objects, gfp_t flags) |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 439 | { |
Roman Gushchin | 9855609 | 2020-08-06 23:21:10 -0700 | [diff] [blame] | 440 | struct obj_cgroup *objcg; |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 441 | |
Roman Gushchin | f7a449f | 2023-02-13 11:29:22 -0800 | [diff] [blame] | 442 | if (!memcg_kmem_online()) |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 443 | return true; |
| 444 | |
| 445 | if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) |
| 446 | return true; |
| 447 | |
Roman Gushchin | 9855609 | 2020-08-06 23:21:10 -0700 | [diff] [blame] | 448 | objcg = get_obj_cgroup_from_current(); |
| 449 | if (!objcg) |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 450 | return true; |
Roman Gushchin | 9855609 | 2020-08-06 23:21:10 -0700 | [diff] [blame] | 451 | |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 452 | if (lru) { |
| 453 | int ret; |
| 454 | struct mem_cgroup *memcg; |
| 455 | |
| 456 | memcg = get_mem_cgroup_from_objcg(objcg); |
| 457 | ret = memcg_list_lru_alloc(memcg, lru, flags); |
| 458 | css_put(&memcg->css); |
| 459 | |
| 460 | if (ret) |
| 461 | goto out; |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 462 | } |
| 463 | |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 464 | if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) |
| 465 | goto out; |
| 466 | |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 467 | *objcgp = objcg; |
| 468 | return true; |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 469 | out: |
| 470 | obj_cgroup_put(objcg); |
| 471 | return false; |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 472 | } |
| 473 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 474 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
| 475 | struct obj_cgroup *objcg, |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 476 | gfp_t flags, size_t size, |
| 477 | void **p) |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 478 | { |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 479 | struct slab *slab; |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 480 | unsigned long off; |
| 481 | size_t i; |
| 482 | |
Roman Gushchin | f7a449f | 2023-02-13 11:29:22 -0800 | [diff] [blame] | 483 | if (!memcg_kmem_online() || !objcg) |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 484 | return; |
| 485 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 486 | for (i = 0; i < size; i++) { |
| 487 | if (likely(p[i])) { |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 488 | slab = virt_to_slab(p[i]); |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 489 | |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 490 | if (!slab_objcgs(slab) && |
| 491 | memcg_alloc_slab_cgroups(slab, s, flags, |
Roman Gushchin | 2e9bd48 | 2021-02-24 12:03:11 -0800 | [diff] [blame] | 492 | false)) { |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 493 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
| 494 | continue; |
| 495 | } |
| 496 | |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 497 | off = obj_to_index(s, slab, p[i]); |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 498 | obj_cgroup_get(objcg); |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 499 | slab_objcgs(slab)[off] = objcg; |
| 500 | mod_objcg_state(objcg, slab_pgdat(slab), |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 501 | cache_vmstat_idx(s), obj_full_size(s)); |
| 502 | } else { |
| 503 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 504 | } |
| 505 | } |
| 506 | obj_cgroup_put(objcg); |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 507 | } |
| 508 | |
Muchun Song | b77d5b1 | 2022-04-29 20:30:44 +0800 | [diff] [blame] | 509 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, |
Bharata B Rao | d1b2cf6 | 2020-10-13 16:53:09 -0700 | [diff] [blame] | 510 | void **p, int objects) |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 511 | { |
Roman Gushchin | 270c6a7 | 2020-12-01 13:58:28 -0800 | [diff] [blame] | 512 | struct obj_cgroup **objcgs; |
Bharata B Rao | d1b2cf6 | 2020-10-13 16:53:09 -0700 | [diff] [blame] | 513 | int i; |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 514 | |
Roman Gushchin | f7a449f | 2023-02-13 11:29:22 -0800 | [diff] [blame] | 515 | if (!memcg_kmem_online()) |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 516 | return; |
| 517 | |
Muchun Song | b77d5b1 | 2022-04-29 20:30:44 +0800 | [diff] [blame] | 518 | objcgs = slab_objcgs(slab); |
| 519 | if (!objcgs) |
| 520 | return; |
| 521 | |
Bharata B Rao | d1b2cf6 | 2020-10-13 16:53:09 -0700 | [diff] [blame] | 522 | for (i = 0; i < objects; i++) { |
Muchun Song | b77d5b1 | 2022-04-29 20:30:44 +0800 | [diff] [blame] | 523 | struct obj_cgroup *objcg; |
| 524 | unsigned int off; |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 525 | |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 526 | off = obj_to_index(s, slab, p[i]); |
Roman Gushchin | 270c6a7 | 2020-12-01 13:58:28 -0800 | [diff] [blame] | 527 | objcg = objcgs[off]; |
Bharata B Rao | d1b2cf6 | 2020-10-13 16:53:09 -0700 | [diff] [blame] | 528 | if (!objcg) |
| 529 | continue; |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 530 | |
Roman Gushchin | 270c6a7 | 2020-12-01 13:58:28 -0800 | [diff] [blame] | 531 | objcgs[off] = NULL; |
Bharata B Rao | d1b2cf6 | 2020-10-13 16:53:09 -0700 | [diff] [blame] | 532 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 533 | mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), |
Bharata B Rao | d1b2cf6 | 2020-10-13 16:53:09 -0700 | [diff] [blame] | 534 | -obj_full_size(s)); |
| 535 | obj_cgroup_put(objcg); |
| 536 | } |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 537 | } |
| 538 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 539 | #else /* CONFIG_MEMCG_KMEM */ |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 540 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) |
| 541 | { |
| 542 | return NULL; |
| 543 | } |
| 544 | |
Roman Gushchin | 9855609 | 2020-08-06 23:21:10 -0700 | [diff] [blame] | 545 | static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 546 | { |
| 547 | return NULL; |
| 548 | } |
| 549 | |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 550 | static inline int memcg_alloc_slab_cgroups(struct slab *slab, |
Roman Gushchin | 2e9bd48 | 2021-02-24 12:03:11 -0800 | [diff] [blame] | 551 | struct kmem_cache *s, gfp_t gfp, |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 552 | bool new_slab) |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 553 | { |
| 554 | return 0; |
| 555 | } |
| 556 | |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 557 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 558 | { |
| 559 | } |
| 560 | |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 561 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 562 | struct list_lru *lru, |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 563 | struct obj_cgroup **objcgp, |
| 564 | size_t objects, gfp_t flags) |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 565 | { |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 566 | return true; |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 567 | } |
| 568 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 569 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
| 570 | struct obj_cgroup *objcg, |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 571 | gfp_t flags, size_t size, |
| 572 | void **p) |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 573 | { |
| 574 | } |
| 575 | |
Muchun Song | b77d5b1 | 2022-04-29 20:30:44 +0800 | [diff] [blame] | 576 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, |
Bharata B Rao | d1b2cf6 | 2020-10-13 16:53:09 -0700 | [diff] [blame] | 577 | void **p, int objects) |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 578 | { |
| 579 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 580 | #endif /* CONFIG_MEMCG_KMEM */ |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 581 | |
Kees Cook | a64b537 | 2019-07-11 20:53:26 -0700 | [diff] [blame] | 582 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
| 583 | { |
Matthew Wilcox (Oracle) | 82c1775 | 2021-10-04 14:45:53 +0100 | [diff] [blame] | 584 | struct slab *slab; |
Kees Cook | a64b537 | 2019-07-11 20:53:26 -0700 | [diff] [blame] | 585 | |
Matthew Wilcox (Oracle) | 82c1775 | 2021-10-04 14:45:53 +0100 | [diff] [blame] | 586 | slab = virt_to_slab(obj); |
| 587 | if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", |
Kees Cook | a64b537 | 2019-07-11 20:53:26 -0700 | [diff] [blame] | 588 | __func__)) |
| 589 | return NULL; |
Matthew Wilcox (Oracle) | 82c1775 | 2021-10-04 14:45:53 +0100 | [diff] [blame] | 590 | return slab->slab_cache; |
Kees Cook | a64b537 | 2019-07-11 20:53:26 -0700 | [diff] [blame] | 591 | } |
| 592 | |
Matthew Wilcox (Oracle) | b918653 | 2021-10-04 14:45:52 +0100 | [diff] [blame] | 593 | static __always_inline void account_slab(struct slab *slab, int order, |
| 594 | struct kmem_cache *s, gfp_t gfp) |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 595 | { |
Roman Gushchin | f7a449f | 2023-02-13 11:29:22 -0800 | [diff] [blame] | 596 | if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 597 | memcg_alloc_slab_cgroups(slab, s, gfp, true); |
Roman Gushchin | 2e9bd48 | 2021-02-24 12:03:11 -0800 | [diff] [blame] | 598 | |
Matthew Wilcox (Oracle) | b918653 | 2021-10-04 14:45:52 +0100 | [diff] [blame] | 599 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 600 | PAGE_SIZE << order); |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 601 | } |
| 602 | |
Matthew Wilcox (Oracle) | b918653 | 2021-10-04 14:45:52 +0100 | [diff] [blame] | 603 | static __always_inline void unaccount_slab(struct slab *slab, int order, |
| 604 | struct kmem_cache *s) |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 605 | { |
Roman Gushchin | f7a449f | 2023-02-13 11:29:22 -0800 | [diff] [blame] | 606 | if (memcg_kmem_online()) |
Vlastimil Babka | 4b5f8d9a | 2021-11-02 22:42:04 +0100 | [diff] [blame] | 607 | memcg_free_slab_cgroups(slab); |
Roman Gushchin | 9855609 | 2020-08-06 23:21:10 -0700 | [diff] [blame] | 608 | |
Matthew Wilcox (Oracle) | b918653 | 2021-10-04 14:45:52 +0100 | [diff] [blame] | 609 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame] | 610 | -(PAGE_SIZE << order)); |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 611 | } |
| 612 | |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 613 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
| 614 | { |
| 615 | struct kmem_cache *cachep; |
| 616 | |
| 617 | if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 618 | !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) |
| 619 | return s; |
| 620 | |
| 621 | cachep = virt_to_cache(x); |
Roman Gushchin | 10befea | 2020-08-06 23:21:27 -0700 | [diff] [blame] | 622 | if (WARN(cachep && cachep != s, |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 623 | "%s: Wrong slab cache. %s but object is from %s\n", |
| 624 | __func__, s->name, cachep->name)) |
| 625 | print_tracking(cachep, x); |
| 626 | return cachep; |
| 627 | } |
Hyeonggon Yoo | d6a7164 | 2022-08-17 19:18:19 +0900 | [diff] [blame] | 628 | |
| 629 | void free_large_kmalloc(struct folio *folio, void *object); |
| 630 | |
Hyeonggon Yoo | 8dfa9d55 | 2022-08-17 19:18:25 +0900 | [diff] [blame] | 631 | size_t __ksize(const void *objp); |
| 632 | |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 633 | static inline size_t slab_ksize(const struct kmem_cache *s) |
| 634 | { |
| 635 | #ifndef CONFIG_SLUB |
| 636 | return s->object_size; |
| 637 | |
| 638 | #else /* CONFIG_SLUB */ |
| 639 | # ifdef CONFIG_SLUB_DEBUG |
| 640 | /* |
| 641 | * Debugging requires use of the padding between object |
| 642 | * and whatever may come after it. |
| 643 | */ |
| 644 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) |
| 645 | return s->object_size; |
| 646 | # endif |
Alexander Potapenko | 80a9201 | 2016-07-28 15:49:07 -0700 | [diff] [blame] | 647 | if (s->flags & SLAB_KASAN) |
| 648 | return s->object_size; |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 649 | /* |
| 650 | * If we have the need to store the freelist pointer |
| 651 | * back there or track user information then we can |
| 652 | * only use the space before that information. |
| 653 | */ |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 654 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 655 | return s->inuse; |
| 656 | /* |
| 657 | * Else we can use all the padding etc for the allocation |
| 658 | */ |
| 659 | return s->size; |
| 660 | #endif |
| 661 | } |
| 662 | |
| 663 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 664 | struct list_lru *lru, |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 665 | struct obj_cgroup **objcgp, |
| 666 | size_t size, gfp_t flags) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 667 | { |
| 668 | flags &= gfp_allowed_mask; |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 669 | |
Daniel Vetter | 95d6c70 | 2020-12-14 19:08:34 -0800 | [diff] [blame] | 670 | might_alloc(flags); |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 671 | |
Jesper Dangaard Brouer | fab9963 | 2016-03-15 14:53:38 -0700 | [diff] [blame] | 672 | if (should_failslab(s, flags)) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 673 | return NULL; |
| 674 | |
Muchun Song | 88f2ef7 | 2022-03-22 14:40:56 -0700 | [diff] [blame] | 675 | if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 676 | return NULL; |
Vladimir Davydov | 4526477 | 2016-07-26 15:24:21 -0700 | [diff] [blame] | 677 | |
| 678 | return s; |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 679 | } |
| 680 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 681 | static inline void slab_post_alloc_hook(struct kmem_cache *s, |
Andrey Konovalov | da844b7 | 2021-04-29 23:00:06 -0700 | [diff] [blame] | 682 | struct obj_cgroup *objcg, gfp_t flags, |
Feng Tang | 9ce6739 | 2022-10-21 11:24:03 +0800 | [diff] [blame] | 683 | size_t size, void **p, bool init, |
| 684 | unsigned int orig_size) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 685 | { |
Feng Tang | 9ce6739 | 2022-10-21 11:24:03 +0800 | [diff] [blame] | 686 | unsigned int zero_size = s->object_size; |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 687 | size_t i; |
| 688 | |
| 689 | flags &= gfp_allowed_mask; |
Andrey Konovalov | da844b7 | 2021-04-29 23:00:06 -0700 | [diff] [blame] | 690 | |
| 691 | /* |
Feng Tang | 9ce6739 | 2022-10-21 11:24:03 +0800 | [diff] [blame] | 692 | * For kmalloc object, the allocated memory size(object_size) is likely |
| 693 | * larger than the requested size(orig_size). If redzone check is |
| 694 | * enabled for the extra space, don't zero it, as it will be redzoned |
| 695 | * soon. The redzone operation for this extra space could be seen as a |
| 696 | * replacement of current poisoning under certain debug option, and |
| 697 | * won't break other sanity checks. |
| 698 | */ |
| 699 | if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && |
| 700 | (s->flags & SLAB_KMALLOC)) |
| 701 | zero_size = orig_size; |
| 702 | |
| 703 | /* |
Andrey Konovalov | da844b7 | 2021-04-29 23:00:06 -0700 | [diff] [blame] | 704 | * As memory initialization might be integrated into KASAN, |
| 705 | * kasan_slab_alloc and initialization memset must be |
| 706 | * kept together to avoid discrepancies in behavior. |
| 707 | * |
| 708 | * As p[i] might get tagged, memset and kmemleak hook come after KASAN. |
| 709 | */ |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 710 | for (i = 0; i < size; i++) { |
Andrey Konovalov | da844b7 | 2021-04-29 23:00:06 -0700 | [diff] [blame] | 711 | p[i] = kasan_slab_alloc(s, p[i], flags, init); |
| 712 | if (p[i] && init && !kasan_has_integrated_init()) |
Feng Tang | 9ce6739 | 2022-10-21 11:24:03 +0800 | [diff] [blame] | 713 | memset(p[i], 0, zero_size); |
Andrey Konovalov | 5312824 | 2019-02-20 22:19:11 -0800 | [diff] [blame] | 714 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 715 | s->flags, flags); |
Alexander Potapenko | 68ef169a | 2022-09-15 17:03:49 +0200 | [diff] [blame] | 716 | kmsan_slab_alloc(s, p[i], flags); |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 717 | } |
Vladimir Davydov | 4526477 | 2016-07-26 15:24:21 -0700 | [diff] [blame] | 718 | |
Roman Gushchin | becaba6 | 2020-12-05 22:14:45 -0800 | [diff] [blame] | 719 | memcg_slab_post_alloc_hook(s, objcg, flags, size, p); |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 720 | } |
| 721 | |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 722 | /* |
| 723 | * The slab lists for all objects. |
| 724 | */ |
| 725 | struct kmem_cache_node { |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 726 | #ifdef CONFIG_SLAB |
Jiri Kosina | b539ce9 | 2022-10-21 21:18:12 +0200 | [diff] [blame] | 727 | raw_spinlock_t list_lock; |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 728 | struct list_head slabs_partial; /* partial list first, better asm code */ |
| 729 | struct list_head slabs_full; |
| 730 | struct list_head slabs_free; |
David Rientjes | bf00bd3 | 2016-12-12 16:41:44 -0800 | [diff] [blame] | 731 | unsigned long total_slabs; /* length of all slab lists */ |
| 732 | unsigned long free_slabs; /* length of free slab list only */ |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 733 | unsigned long free_objects; |
| 734 | unsigned int free_limit; |
| 735 | unsigned int colour_next; /* Per-node cache coloring */ |
| 736 | struct array_cache *shared; /* shared per node */ |
Joonsoo Kim | c8522a3 | 2014-08-06 16:04:29 -0700 | [diff] [blame] | 737 | struct alien_cache **alien; /* on other nodes */ |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 738 | unsigned long next_reap; /* updated without locking */ |
| 739 | int free_touched; /* updated without locking */ |
| 740 | #endif |
| 741 | |
| 742 | #ifdef CONFIG_SLUB |
Jiri Kosina | b539ce9 | 2022-10-21 21:18:12 +0200 | [diff] [blame] | 743 | spinlock_t list_lock; |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 744 | unsigned long nr_partial; |
| 745 | struct list_head partial; |
| 746 | #ifdef CONFIG_SLUB_DEBUG |
| 747 | atomic_long_t nr_slabs; |
| 748 | atomic_long_t total_objects; |
| 749 | struct list_head full; |
| 750 | #endif |
| 751 | #endif |
| 752 | |
| 753 | }; |
Wanpeng Li | e25839f | 2013-07-04 08:33:23 +0800 | [diff] [blame] | 754 | |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 755 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
| 756 | { |
| 757 | return s->node[node]; |
| 758 | } |
| 759 | |
| 760 | /* |
| 761 | * Iterator over all nodes. The body will be executed for each node that has |
| 762 | * a kmem_cache_node structure allocated (which is true for all online nodes) |
| 763 | */ |
| 764 | #define for_each_kmem_cache_node(__s, __node, __n) \ |
Mikulas Patocka | 9163582 | 2014-10-09 15:26:20 -0700 | [diff] [blame] | 765 | for (__node = 0; __node < nr_node_ids; __node++) \ |
| 766 | if ((__n = get_node(__s, __node))) |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 767 | |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 768 | |
Yang Shi | 852d8be | 2017-11-15 17:32:07 -0800 | [diff] [blame] | 769 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
| 770 | void dump_unreclaimable_slab(void); |
| 771 | #else |
| 772 | static inline void dump_unreclaimable_slab(void) |
| 773 | { |
| 774 | } |
| 775 | #endif |
| 776 | |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 777 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
| 778 | |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 779 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
| 780 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, |
| 781 | gfp_t gfp); |
| 782 | void cache_random_seq_destroy(struct kmem_cache *cachep); |
| 783 | #else |
| 784 | static inline int cache_random_seq_create(struct kmem_cache *cachep, |
| 785 | unsigned int count, gfp_t gfp) |
| 786 | { |
| 787 | return 0; |
| 788 | } |
| 789 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } |
| 790 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
| 791 | |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 792 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
| 793 | { |
Kees Cook | 51cba1e | 2021-04-01 16:23:43 -0700 | [diff] [blame] | 794 | if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, |
| 795 | &init_on_alloc)) { |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 796 | if (c->ctor) |
| 797 | return false; |
| 798 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) |
| 799 | return flags & __GFP_ZERO; |
| 800 | return true; |
| 801 | } |
| 802 | return flags & __GFP_ZERO; |
| 803 | } |
| 804 | |
| 805 | static inline bool slab_want_init_on_free(struct kmem_cache *c) |
| 806 | { |
Kees Cook | 51cba1e | 2021-04-01 16:23:43 -0700 | [diff] [blame] | 807 | if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, |
| 808 | &init_on_free)) |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 809 | return !(c->ctor || |
| 810 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); |
| 811 | return false; |
| 812 | } |
| 813 | |
Faiyaz Mohammed | 64dd684 | 2021-06-28 19:34:55 -0700 | [diff] [blame] | 814 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) |
| 815 | void debugfs_slab_release(struct kmem_cache *); |
| 816 | #else |
| 817 | static inline void debugfs_slab_release(struct kmem_cache *s) { } |
| 818 | #endif |
| 819 | |
Paul E. McKenney | 5bb1bb3 | 2021-01-07 13:46:11 -0800 | [diff] [blame] | 820 | #ifdef CONFIG_PRINTK |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 821 | #define KS_ADDRS_COUNT 16 |
| 822 | struct kmem_obj_info { |
| 823 | void *kp_ptr; |
Matthew Wilcox (Oracle) | 7213230 | 2021-10-04 14:45:55 +0100 | [diff] [blame] | 824 | struct slab *kp_slab; |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 825 | void *kp_objp; |
| 826 | unsigned long kp_data_offset; |
| 827 | struct kmem_cache *kp_slab_cache; |
| 828 | void *kp_ret; |
| 829 | void *kp_stack[KS_ADDRS_COUNT]; |
Maninder Singh | e548eaa | 2021-03-16 16:07:11 +0530 | [diff] [blame] | 830 | void *kp_free_stack[KS_ADDRS_COUNT]; |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 831 | }; |
Marco Elver | 2dfe63e | 2022-04-14 19:13:40 -0700 | [diff] [blame] | 832 | void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); |
Paul E. McKenney | 5bb1bb3 | 2021-01-07 13:46:11 -0800 | [diff] [blame] | 833 | #endif |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 834 | |
Matthew Wilcox (Oracle) | 0b3eb091 | 2021-10-04 14:45:56 +0100 | [diff] [blame] | 835 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR |
| 836 | void __check_heap_object(const void *ptr, unsigned long n, |
| 837 | const struct slab *slab, bool to_user); |
| 838 | #else |
| 839 | static inline |
| 840 | void __check_heap_object(const void *ptr, unsigned long n, |
| 841 | const struct slab *slab, bool to_user) |
| 842 | { |
| 843 | } |
| 844 | #endif |
| 845 | |
Feng Tang | 946fa0d | 2022-10-21 11:24:05 +0800 | [diff] [blame] | 846 | #ifdef CONFIG_SLUB_DEBUG |
| 847 | void skip_orig_size_check(struct kmem_cache *s, const void *object); |
| 848 | #endif |
| 849 | |
Andrey Ryabinin | 5240ab4 | 2014-08-06 16:04:14 -0700 | [diff] [blame] | 850 | #endif /* MM_SLAB_H */ |