blob: f01ac256a8f5565e173a31d74d8a19d54a26212e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
Mike Rapoport (IBM)d5d2c022023-03-21 19:05:11 +02007void __init kmem_cache_init(void);
Christoph Lameter97d06602012-07-06 15:25:11 -05008
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +01009/* Reuses the bits in struct page */
10struct slab {
11 unsigned long __page_flags;
Vlastimil Babka401fb122021-11-04 11:30:58 +010012
13#if defined(CONFIG_SLAB)
14
Vlastimil Babka130d4df2022-08-26 11:09:12 +020015 struct kmem_cache *slab_cache;
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010016 union {
Vlastimil Babka130d4df2022-08-26 11:09:12 +020017 struct {
18 struct list_head slab_list;
19 void *freelist; /* array of free object indexes */
20 void *s_mem; /* first object */
21 };
Vlastimil Babka401fb122021-11-04 11:30:58 +010022 struct rcu_head rcu_head;
23 };
Vlastimil Babka401fb122021-11-04 11:30:58 +010024 unsigned int active;
25
26#elif defined(CONFIG_SLUB)
27
Vlastimil Babka401fb122021-11-04 11:30:58 +010028 struct kmem_cache *slab_cache;
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010029 union {
Vlastimil Babka401fb122021-11-04 11:30:58 +010030 struct {
Vlastimil Babka130d4df2022-08-26 11:09:12 +020031 union {
32 struct list_head slab_list;
33#ifdef CONFIG_SLUB_CPU_PARTIAL
34 struct {
35 struct slab *next;
36 int slabs; /* Nr of slabs left */
37 };
38#endif
39 };
40 /* Double-word boundary */
41 void *freelist; /* first free object */
42 union {
43 unsigned long counters;
44 struct {
45 unsigned inuse:16;
46 unsigned objects:15;
47 unsigned frozen:1;
48 };
49 };
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010050 };
Vlastimil Babka130d4df2022-08-26 11:09:12 +020051 struct rcu_head rcu_head;
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010052 };
Vlastimil Babka401fb122021-11-04 11:30:58 +010053 unsigned int __unused;
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010054
Vlastimil Babka401fb122021-11-04 11:30:58 +010055#else
56#error "Unexpected slab allocator configured"
57#endif
58
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010059 atomic_t __page_refcount;
60#ifdef CONFIG_MEMCG
61 unsigned long memcg_data;
62#endif
63};
64
65#define SLAB_MATCH(pg, sl) \
66 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
67SLAB_MATCH(flags, __page_flags);
Vlastimil Babka130d4df2022-08-26 11:09:12 +020068SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010069SLAB_MATCH(_refcount, __page_refcount);
70#ifdef CONFIG_MEMCG
71SLAB_MATCH(memcg_data, memcg_data);
72#endif
73#undef SLAB_MATCH
74static_assert(sizeof(struct slab) <= sizeof(struct page));
Vlastimil Babka130d4df2022-08-26 11:09:12 +020075#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
76static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *)));
77#endif
Matthew Wilcox (Oracle)d1220192021-10-04 14:45:51 +010078
79/**
80 * folio_slab - Converts from folio to slab.
81 * @folio: The folio.
82 *
83 * Currently struct slab is a different representation of a folio where
84 * folio_test_slab() is true.
85 *
86 * Return: The slab which contains this folio.
87 */
88#define folio_slab(folio) (_Generic((folio), \
89 const struct folio *: (const struct slab *)(folio), \
90 struct folio *: (struct slab *)(folio)))
91
92/**
93 * slab_folio - The folio allocated for a slab
94 * @slab: The slab.
95 *
96 * Slabs are allocated as folios that contain the individual objects and are
97 * using some fields in the first struct page of the folio - those fields are
98 * now accessed by struct slab. It is occasionally necessary to convert back to
99 * a folio in order to communicate with the rest of the mm. Please use this
100 * helper function instead of casting yourself, as the implementation may change
101 * in the future.
102 */
103#define slab_folio(s) (_Generic((s), \
104 const struct slab *: (const struct folio *)s, \
105 struct slab *: (struct folio *)s))
106
107/**
108 * page_slab - Converts from first struct page to slab.
109 * @p: The first (either head of compound or single) page of slab.
110 *
111 * A temporary wrapper to convert struct page to struct slab in situations where
112 * we know the page is the compound head, or single order-0 page.
113 *
114 * Long-term ideally everything would work with struct slab directly or go
115 * through folio to struct slab.
116 *
117 * Return: The slab which contains this page
118 */
119#define page_slab(p) (_Generic((p), \
120 const struct page *: (const struct slab *)(p), \
121 struct page *: (struct slab *)(p)))
122
123/**
124 * slab_page - The first struct page allocated for a slab
125 * @slab: The slab.
126 *
127 * A convenience wrapper for converting slab to the first struct page of the
128 * underlying folio, to communicate with code not yet converted to folio or
129 * struct slab.
130 */
131#define slab_page(s) folio_page(slab_folio(s), 0)
132
133/*
134 * If network-based swap is enabled, sl*b must keep track of whether pages
135 * were allocated from pfmemalloc reserves.
136 */
137static inline bool slab_test_pfmemalloc(const struct slab *slab)
138{
139 return folio_test_active((struct folio *)slab_folio(slab));
140}
141
142static inline void slab_set_pfmemalloc(struct slab *slab)
143{
144 folio_set_active(slab_folio(slab));
145}
146
147static inline void slab_clear_pfmemalloc(struct slab *slab)
148{
149 folio_clear_active(slab_folio(slab));
150}
151
152static inline void __slab_clear_pfmemalloc(struct slab *slab)
153{
154 __folio_clear_active(slab_folio(slab));
155}
156
157static inline void *slab_address(const struct slab *slab)
158{
159 return folio_address(slab_folio(slab));
160}
161
162static inline int slab_nid(const struct slab *slab)
163{
164 return folio_nid(slab_folio(slab));
165}
166
167static inline pg_data_t *slab_pgdat(const struct slab *slab)
168{
169 return folio_pgdat(slab_folio(slab));
170}
171
172static inline struct slab *virt_to_slab(const void *addr)
173{
174 struct folio *folio = virt_to_folio(addr);
175
176 if (!folio_test_slab(folio))
177 return NULL;
178
179 return folio_slab(folio);
180}
181
182static inline int slab_order(const struct slab *slab)
183{
184 return folio_order((struct folio *)slab_folio(slab));
185}
186
187static inline size_t slab_size(const struct slab *slab)
188{
189 return PAGE_SIZE << slab_order(slab);
190}
191
Joonsoo Kim07f361b2014-10-09 15:26:00 -0700192#ifdef CONFIG_SLAB
193#include <linux/slab_def.h>
194#endif
195
196#ifdef CONFIG_SLUB
197#include <linux/slub_def.h>
198#endif
199
200#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700201#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700202#include <linux/kasan.h>
203#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700204#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100205#include <linux/sched/mm.h>
Muchun Song88f2ef72022-03-22 14:40:56 -0700206#include <linux/list_lru.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -0700207
Christoph Lameter97d06602012-07-06 15:25:11 -0500208/*
209 * State of the slab allocator.
210 *
211 * This is used to describe the states of the allocator during bootup.
212 * Allocators use this to gradually bootstrap themselves. Most allocators
213 * have the problem that the structures used for managing slab caches are
214 * allocated from slab caches themselves.
215 */
216enum slab_state {
217 DOWN, /* No slab functionality yet */
218 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000219 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -0500220 UP, /* Slab caches usable but not all extras yet */
221 FULL /* Everything is working */
222};
223
224extern enum slab_state slab_state;
225
Christoph Lameter18004c52012-07-06 15:25:12 -0500226/* The slab cache mutex protects the management structures during changes */
227extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000228
229/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -0500230extern struct list_head slab_caches;
231
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000232/* The slab cache that manages slab cache information */
233extern struct kmem_cache *kmem_cache;
234
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800235/* A table of kmalloc cache names and sizes */
236extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -0800237 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700238 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800239} kmalloc_info[];
240
Christoph Lameterf97d5f632013-01-10 19:12:17 +0000241/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -0700242void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800243void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000244
245/* Find the kmalloc slab corresponding for a certain size */
246struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Hyeonggon Yooed4cd172022-08-17 19:18:20 +0900247
248void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
249 int node, size_t orig_size,
250 unsigned long caller);
251void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
Christoph Lameterf97d5f632013-01-10 19:12:17 +0000252
Long Li44405092020-08-06 23:18:28 -0700253gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f632013-01-10 19:12:17 +0000254
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000255/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800256int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -0500257
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700258struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
259 slab_flags_t flags, unsigned int useroffset,
260 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000261extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700262 unsigned int size, slab_flags_t flags,
263 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000264
Joonsoo Kim423c9292014-10-09 15:26:22 -0700265int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700266struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800267 slab_flags_t flags, const char *name, void (*ctor)(void *));
Glauber Costa2633d7a2012-12-18 14:22:34 -0800268struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700269__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800270 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700271
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700272slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov37540002021-02-24 12:00:58 -0800273 slab_flags_t flags, const char *name);
Christoph Lametercbb79692012-09-05 00:18:32 +0000274
Feng Tangbb944292023-01-04 14:06:04 +0800275static inline bool is_kmalloc_cache(struct kmem_cache *s)
276{
Feng Tangbb944292023-01-04 14:06:04 +0800277 return (s->flags & SLAB_KMALLOC);
Feng Tangbb944292023-01-04 14:06:04 +0800278}
Christoph Lametercbb79692012-09-05 00:18:32 +0000279
Glauber Costad8843922012-10-17 15:36:51 +0400280/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700281#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
282 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800283 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400284
285#if defined(CONFIG_DEBUG_SLAB)
286#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
287#elif defined(CONFIG_SLUB_DEBUG)
288#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700289 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400290#else
291#define SLAB_DEBUG_FLAGS (0)
292#endif
293
294#if defined(CONFIG_SLAB)
295#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800296 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800297 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400298#elif defined(CONFIG_SLUB)
299#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Feng Tang6cd6d33c2022-11-30 16:54:51 +0800300 SLAB_TEMPORARY | SLAB_ACCOUNT | \
301 SLAB_NO_USER_FLAGS | SLAB_KMALLOC)
Glauber Costad8843922012-10-17 15:36:51 +0400302#else
Rustam Kovhaev34dbc3aa2021-11-19 16:43:37 -0800303#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
Glauber Costad8843922012-10-17 15:36:51 +0400304#endif
305
Thomas Garniere70954f2016-12-12 16:41:38 -0800306/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400307#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
308
Thomas Garniere70954f2016-12-12 16:41:38 -0800309/* Common flags permitted for kmem_cache_create */
310#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
311 SLAB_RED_ZONE | \
312 SLAB_POISON | \
313 SLAB_STORE_USER | \
314 SLAB_TRACE | \
315 SLAB_CONSISTENCY_CHECKS | \
316 SLAB_MEM_SPREAD | \
317 SLAB_NOLEAKTRACE | \
318 SLAB_RECLAIM_ACCOUNT | \
319 SLAB_TEMPORARY | \
Hyeonggon Yooa2859092022-04-06 15:00:03 +0900320 SLAB_ACCOUNT | \
Feng Tang6cd6d33c2022-11-30 16:54:51 +0800321 SLAB_KMALLOC | \
Hyeonggon Yooa2859092022-04-06 15:00:03 +0900322 SLAB_NO_USER_FLAGS)
Thomas Garniere70954f2016-12-12 16:41:38 -0800323
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700324bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000325int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800326void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800327int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700328void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000329
Glauber Costab7454ad2012-10-19 18:20:25 +0400330struct seq_file;
331struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400332
Glauber Costa0d7561c2012-10-19 18:20:27 +0400333struct slabinfo {
334 unsigned long active_objs;
335 unsigned long num_objs;
336 unsigned long active_slabs;
337 unsigned long num_slabs;
338 unsigned long shared_avail;
339 unsigned int limit;
340 unsigned int batchcount;
341 unsigned int shared;
342 unsigned int objects_per_slab;
343 unsigned int cache_order;
344};
345
346void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
347void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400348ssize_t slabinfo_write(struct file *file, const char __user *buffer,
349 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800350
Muchun Song1a984c42020-12-14 19:06:24 -0800351static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700352{
353 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700354 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700355}
356
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700357#ifdef CONFIG_SLUB_DEBUG
358#ifdef CONFIG_SLUB_DEBUG_ON
359DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
360#else
361DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
362#endif
363extern void print_tracking(struct kmem_cache *s, void *object);
Oliver Glitta1f9f78b2021-06-28 19:34:33 -0700364long validate_slab_cache(struct kmem_cache *s);
Marco Elver0d4a0622021-07-14 21:26:34 -0700365static inline bool __slub_debug_enabled(void)
366{
367 return static_branch_unlikely(&slub_debug_enabled);
368}
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700369#else
370static inline void print_tracking(struct kmem_cache *s, void *object)
371{
372}
Marco Elver0d4a0622021-07-14 21:26:34 -0700373static inline bool __slub_debug_enabled(void)
374{
375 return false;
376}
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700377#endif
378
379/*
380 * Returns true if any of the specified slub_debug flags is enabled for the
381 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
382 * the static key.
383 */
384static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
385{
Marco Elver0d4a0622021-07-14 21:26:34 -0700386 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
387 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
388 if (__slub_debug_enabled())
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700389 return s->flags & flags;
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700390 return false;
391}
392
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700393#ifdef CONFIG_MEMCG_KMEM
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100394/*
395 * slab_objcgs - get the object cgroups vector associated with a slab
396 * @slab: a pointer to the slab struct
397 *
398 * Returns a pointer to the object cgroups vector associated with the slab,
399 * or NULL if no such vector has been associated yet.
400 */
401static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
402{
403 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
404
405 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
406 slab_page(slab));
407 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
408
409 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
410}
411
412int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
413 gfp_t gfp, bool new_slab);
Waiman Longfdbcb2a2021-06-28 19:37:19 -0700414void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
415 enum node_stat_item idx, int nr);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700416
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100417static inline void memcg_free_slab_cgroups(struct slab *slab)
Roman Gushchin286e04b2020-08-06 23:20:52 -0700418{
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100419 kfree(slab_objcgs(slab));
420 slab->memcg_data = 0;
Roman Gushchin286e04b2020-08-06 23:20:52 -0700421}
422
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700423static inline size_t obj_full_size(struct kmem_cache *s)
424{
425 /*
426 * For each accounted object there is an extra space which is used
427 * to store obj_cgroup membership. Charge it too.
428 */
429 return s->size + sizeof(struct obj_cgroup *);
430}
431
Roman Gushchinbecaba62020-12-05 22:14:45 -0800432/*
433 * Returns false if the allocation should fail.
434 */
435static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
Muchun Song88f2ef72022-03-22 14:40:56 -0700436 struct list_lru *lru,
Roman Gushchinbecaba62020-12-05 22:14:45 -0800437 struct obj_cgroup **objcgp,
438 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700439{
Roman Gushchin98556092020-08-06 23:21:10 -0700440 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700441
Roman Gushchinf7a449f2023-02-13 11:29:22 -0800442 if (!memcg_kmem_online())
Roman Gushchinbecaba62020-12-05 22:14:45 -0800443 return true;
444
445 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
446 return true;
447
Roman Gushchin98556092020-08-06 23:21:10 -0700448 objcg = get_obj_cgroup_from_current();
449 if (!objcg)
Roman Gushchinbecaba62020-12-05 22:14:45 -0800450 return true;
Roman Gushchin98556092020-08-06 23:21:10 -0700451
Muchun Song88f2ef72022-03-22 14:40:56 -0700452 if (lru) {
453 int ret;
454 struct mem_cgroup *memcg;
455
456 memcg = get_mem_cgroup_from_objcg(objcg);
457 ret = memcg_list_lru_alloc(memcg, lru, flags);
458 css_put(&memcg->css);
459
460 if (ret)
461 goto out;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700462 }
463
Muchun Song88f2ef72022-03-22 14:40:56 -0700464 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
465 goto out;
466
Roman Gushchinbecaba62020-12-05 22:14:45 -0800467 *objcgp = objcg;
468 return true;
Muchun Song88f2ef72022-03-22 14:40:56 -0700469out:
470 obj_cgroup_put(objcg);
471 return false;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700472}
473
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700474static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
475 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700476 gfp_t flags, size_t size,
477 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700478{
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100479 struct slab *slab;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700480 unsigned long off;
481 size_t i;
482
Roman Gushchinf7a449f2023-02-13 11:29:22 -0800483 if (!memcg_kmem_online() || !objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700484 return;
485
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700486 for (i = 0; i < size; i++) {
487 if (likely(p[i])) {
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100488 slab = virt_to_slab(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700489
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100490 if (!slab_objcgs(slab) &&
491 memcg_alloc_slab_cgroups(slab, s, flags,
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800492 false)) {
Roman Gushchin10befea2020-08-06 23:21:27 -0700493 obj_cgroup_uncharge(objcg, obj_full_size(s));
494 continue;
495 }
496
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100497 off = obj_to_index(s, slab, p[i]);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700498 obj_cgroup_get(objcg);
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100499 slab_objcgs(slab)[off] = objcg;
500 mod_objcg_state(objcg, slab_pgdat(slab),
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700501 cache_vmstat_idx(s), obj_full_size(s));
502 } else {
503 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700504 }
505 }
506 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700507}
508
Muchun Songb77d5b12022-04-29 20:30:44 +0800509static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700510 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700511{
Roman Gushchin270c6a72020-12-01 13:58:28 -0800512 struct obj_cgroup **objcgs;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700513 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700514
Roman Gushchinf7a449f2023-02-13 11:29:22 -0800515 if (!memcg_kmem_online())
Roman Gushchin10befea2020-08-06 23:21:27 -0700516 return;
517
Muchun Songb77d5b12022-04-29 20:30:44 +0800518 objcgs = slab_objcgs(slab);
519 if (!objcgs)
520 return;
521
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700522 for (i = 0; i < objects; i++) {
Muchun Songb77d5b12022-04-29 20:30:44 +0800523 struct obj_cgroup *objcg;
524 unsigned int off;
Roman Gushchin10befea2020-08-06 23:21:27 -0700525
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100526 off = obj_to_index(s, slab, p[i]);
Roman Gushchin270c6a72020-12-01 13:58:28 -0800527 objcg = objcgs[off];
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700528 if (!objcg)
529 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700530
Roman Gushchin270c6a72020-12-01 13:58:28 -0800531 objcgs[off] = NULL;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700532 obj_cgroup_uncharge(objcg, obj_full_size(s));
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100533 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700534 -obj_full_size(s));
535 obj_cgroup_put(objcg);
536 }
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700537}
538
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700539#else /* CONFIG_MEMCG_KMEM */
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100540static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
541{
542 return NULL;
543}
544
Roman Gushchin98556092020-08-06 23:21:10 -0700545static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700546{
547 return NULL;
548}
549
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100550static inline int memcg_alloc_slab_cgroups(struct slab *slab,
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800551 struct kmem_cache *s, gfp_t gfp,
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100552 bool new_slab)
Roman Gushchin286e04b2020-08-06 23:20:52 -0700553{
554 return 0;
555}
556
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100557static inline void memcg_free_slab_cgroups(struct slab *slab)
Roman Gushchin286e04b2020-08-06 23:20:52 -0700558{
559}
560
Roman Gushchinbecaba62020-12-05 22:14:45 -0800561static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
Muchun Song88f2ef72022-03-22 14:40:56 -0700562 struct list_lru *lru,
Roman Gushchinbecaba62020-12-05 22:14:45 -0800563 struct obj_cgroup **objcgp,
564 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700565{
Roman Gushchinbecaba62020-12-05 22:14:45 -0800566 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700567}
568
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700569static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
570 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700571 gfp_t flags, size_t size,
572 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700573{
574}
575
Muchun Songb77d5b12022-04-29 20:30:44 +0800576static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700577 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700578{
579}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700580#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800581
Kees Cooka64b5372019-07-11 20:53:26 -0700582static inline struct kmem_cache *virt_to_cache(const void *obj)
583{
Matthew Wilcox (Oracle)82c17752021-10-04 14:45:53 +0100584 struct slab *slab;
Kees Cooka64b5372019-07-11 20:53:26 -0700585
Matthew Wilcox (Oracle)82c17752021-10-04 14:45:53 +0100586 slab = virt_to_slab(obj);
587 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
Kees Cooka64b5372019-07-11 20:53:26 -0700588 __func__))
589 return NULL;
Matthew Wilcox (Oracle)82c17752021-10-04 14:45:53 +0100590 return slab->slab_cache;
Kees Cooka64b5372019-07-11 20:53:26 -0700591}
592
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100593static __always_inline void account_slab(struct slab *slab, int order,
594 struct kmem_cache *s, gfp_t gfp)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700595{
Roman Gushchinf7a449f2023-02-13 11:29:22 -0800596 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100597 memcg_alloc_slab_cgroups(slab, s, gfp, true);
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800598
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100599 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700600 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700601}
602
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100603static __always_inline void unaccount_slab(struct slab *slab, int order,
604 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700605{
Roman Gushchinf7a449f2023-02-13 11:29:22 -0800606 if (memcg_kmem_online())
Vlastimil Babka4b5f8d9a2021-11-02 22:42:04 +0100607 memcg_free_slab_cgroups(slab);
Roman Gushchin98556092020-08-06 23:21:10 -0700608
Matthew Wilcox (Oracle)b9186532021-10-04 14:45:52 +0100609 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700610 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700611}
612
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700613static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
614{
615 struct kmem_cache *cachep;
616
617 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700618 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
619 return s;
620
621 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700622 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700623 "%s: Wrong slab cache. %s but object is from %s\n",
624 __func__, s->name, cachep->name))
625 print_tracking(cachep, x);
626 return cachep;
627}
Hyeonggon Yood6a71642022-08-17 19:18:19 +0900628
629void free_large_kmalloc(struct folio *folio, void *object);
630
Hyeonggon Yoo8dfa9d552022-08-17 19:18:25 +0900631size_t __ksize(const void *objp);
632
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700633static inline size_t slab_ksize(const struct kmem_cache *s)
634{
635#ifndef CONFIG_SLUB
636 return s->object_size;
637
638#else /* CONFIG_SLUB */
639# ifdef CONFIG_SLUB_DEBUG
640 /*
641 * Debugging requires use of the padding between object
642 * and whatever may come after it.
643 */
644 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
645 return s->object_size;
646# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700647 if (s->flags & SLAB_KASAN)
648 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700649 /*
650 * If we have the need to store the freelist pointer
651 * back there or track user information then we can
652 * only use the space before that information.
653 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800654 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700655 return s->inuse;
656 /*
657 * Else we can use all the padding etc for the allocation
658 */
659 return s->size;
660#endif
661}
662
663static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Muchun Song88f2ef72022-03-22 14:40:56 -0700664 struct list_lru *lru,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700665 struct obj_cgroup **objcgp,
666 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700667{
668 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100669
Daniel Vetter95d6c702020-12-14 19:08:34 -0800670 might_alloc(flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700671
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700672 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700673 return NULL;
674
Muchun Song88f2ef72022-03-22 14:40:56 -0700675 if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
Roman Gushchinbecaba62020-12-05 22:14:45 -0800676 return NULL;
Vladimir Davydov45264772016-07-26 15:24:21 -0700677
678 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700679}
680
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700681static inline void slab_post_alloc_hook(struct kmem_cache *s,
Andrey Konovalovda844b72021-04-29 23:00:06 -0700682 struct obj_cgroup *objcg, gfp_t flags,
Feng Tang9ce67392022-10-21 11:24:03 +0800683 size_t size, void **p, bool init,
684 unsigned int orig_size)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700685{
Feng Tang9ce67392022-10-21 11:24:03 +0800686 unsigned int zero_size = s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700687 size_t i;
688
689 flags &= gfp_allowed_mask;
Andrey Konovalovda844b72021-04-29 23:00:06 -0700690
691 /*
Feng Tang9ce67392022-10-21 11:24:03 +0800692 * For kmalloc object, the allocated memory size(object_size) is likely
693 * larger than the requested size(orig_size). If redzone check is
694 * enabled for the extra space, don't zero it, as it will be redzoned
695 * soon. The redzone operation for this extra space could be seen as a
696 * replacement of current poisoning under certain debug option, and
697 * won't break other sanity checks.
698 */
699 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
700 (s->flags & SLAB_KMALLOC))
701 zero_size = orig_size;
702
703 /*
Andrey Konovalovda844b72021-04-29 23:00:06 -0700704 * As memory initialization might be integrated into KASAN,
705 * kasan_slab_alloc and initialization memset must be
706 * kept together to avoid discrepancies in behavior.
707 *
708 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
709 */
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700710 for (i = 0; i < size; i++) {
Andrey Konovalovda844b72021-04-29 23:00:06 -0700711 p[i] = kasan_slab_alloc(s, p[i], flags, init);
712 if (p[i] && init && !kasan_has_integrated_init())
Feng Tang9ce67392022-10-21 11:24:03 +0800713 memset(p[i], 0, zero_size);
Andrey Konovalov53128242019-02-20 22:19:11 -0800714 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700715 s->flags, flags);
Alexander Potapenko68ef169a2022-09-15 17:03:49 +0200716 kmsan_slab_alloc(s, p[i], flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700717 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700718
Roman Gushchinbecaba62020-12-05 22:14:45 -0800719 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700720}
721
Christoph Lameterca349562013-01-10 19:14:19 +0000722/*
723 * The slab lists for all objects.
724 */
725struct kmem_cache_node {
Christoph Lameterca349562013-01-10 19:14:19 +0000726#ifdef CONFIG_SLAB
Jiri Kosinab539ce92022-10-21 21:18:12 +0200727 raw_spinlock_t list_lock;
Christoph Lameterca349562013-01-10 19:14:19 +0000728 struct list_head slabs_partial; /* partial list first, better asm code */
729 struct list_head slabs_full;
730 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800731 unsigned long total_slabs; /* length of all slab lists */
732 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000733 unsigned long free_objects;
734 unsigned int free_limit;
735 unsigned int colour_next; /* Per-node cache coloring */
736 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700737 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000738 unsigned long next_reap; /* updated without locking */
739 int free_touched; /* updated without locking */
740#endif
741
742#ifdef CONFIG_SLUB
Jiri Kosinab539ce92022-10-21 21:18:12 +0200743 spinlock_t list_lock;
Christoph Lameterca349562013-01-10 19:14:19 +0000744 unsigned long nr_partial;
745 struct list_head partial;
746#ifdef CONFIG_SLUB_DEBUG
747 atomic_long_t nr_slabs;
748 atomic_long_t total_objects;
749 struct list_head full;
750#endif
751#endif
752
753};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800754
Christoph Lameter44c53562014-08-06 16:04:07 -0700755static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
756{
757 return s->node[node];
758}
759
760/*
761 * Iterator over all nodes. The body will be executed for each node that has
762 * a kmem_cache_node structure allocated (which is true for all online nodes)
763 */
764#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700765 for (__node = 0; __node < nr_node_ids; __node++) \
766 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700767
Christoph Lameter44c53562014-08-06 16:04:07 -0700768
Yang Shi852d8be2017-11-15 17:32:07 -0800769#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
770void dump_unreclaimable_slab(void);
771#else
772static inline void dump_unreclaimable_slab(void)
773{
774}
775#endif
776
Alexander Potapenko55834c52016-05-20 16:59:11 -0700777void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
778
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700779#ifdef CONFIG_SLAB_FREELIST_RANDOM
780int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
781 gfp_t gfp);
782void cache_random_seq_destroy(struct kmem_cache *cachep);
783#else
784static inline int cache_random_seq_create(struct kmem_cache *cachep,
785 unsigned int count, gfp_t gfp)
786{
787 return 0;
788}
789static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
790#endif /* CONFIG_SLAB_FREELIST_RANDOM */
791
Alexander Potapenko64713842019-07-11 20:59:19 -0700792static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
793{
Kees Cook51cba1e2021-04-01 16:23:43 -0700794 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
795 &init_on_alloc)) {
Alexander Potapenko64713842019-07-11 20:59:19 -0700796 if (c->ctor)
797 return false;
798 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
799 return flags & __GFP_ZERO;
800 return true;
801 }
802 return flags & __GFP_ZERO;
803}
804
805static inline bool slab_want_init_on_free(struct kmem_cache *c)
806{
Kees Cook51cba1e2021-04-01 16:23:43 -0700807 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
808 &init_on_free))
Alexander Potapenko64713842019-07-11 20:59:19 -0700809 return !(c->ctor ||
810 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
811 return false;
812}
813
Faiyaz Mohammed64dd6842021-06-28 19:34:55 -0700814#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
815void debugfs_slab_release(struct kmem_cache *);
816#else
817static inline void debugfs_slab_release(struct kmem_cache *s) { }
818#endif
819
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -0800820#ifdef CONFIG_PRINTK
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800821#define KS_ADDRS_COUNT 16
822struct kmem_obj_info {
823 void *kp_ptr;
Matthew Wilcox (Oracle)72132302021-10-04 14:45:55 +0100824 struct slab *kp_slab;
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800825 void *kp_objp;
826 unsigned long kp_data_offset;
827 struct kmem_cache *kp_slab_cache;
828 void *kp_ret;
829 void *kp_stack[KS_ADDRS_COUNT];
Maninder Singhe548eaa2021-03-16 16:07:11 +0530830 void *kp_free_stack[KS_ADDRS_COUNT];
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800831};
Marco Elver2dfe63e2022-04-14 19:13:40 -0700832void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -0800833#endif
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800834
Matthew Wilcox (Oracle)0b3eb0912021-10-04 14:45:56 +0100835#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
836void __check_heap_object(const void *ptr, unsigned long n,
837 const struct slab *slab, bool to_user);
838#else
839static inline
840void __check_heap_object(const void *ptr, unsigned long n,
841 const struct slab *slab, bool to_user)
842{
843}
844#endif
845
Feng Tang946fa0d2022-10-21 11:24:05 +0800846#ifdef CONFIG_SLUB_DEBUG
847void skip_orig_size_check(struct kmem_cache *s, const void *object);
848#endif
849
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700850#endif /* MM_SLAB_H */