blob: 18c1927cd196ce2d726c7eb85480e09149c634b9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070049
Christoph Lameter97d06602012-07-06 15:25:11 -050050/*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000061 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050062 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64};
65
66extern enum slab_state slab_state;
67
Christoph Lameter18004c52012-07-06 15:25:12 -050068/* The slab cache mutex protects the management structures during changes */
69extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000070
71/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050072extern struct list_head slab_caches;
73
Christoph Lameter9b030cb2012-09-05 00:20:33 +000074/* The slab cache that manages slab cache information */
75extern struct kmem_cache *kmem_cache;
76
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080077/* A table of kmalloc cache names and sizes */
78extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -080079 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070080 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080081} kmalloc_info[];
82
Christoph Lameterf97d5f632013-01-10 19:12:17 +000083#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070085void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080086void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000087
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f632013-01-10 19:12:17 +000090#endif
91
Long Li44405092020-08-06 23:18:28 -070092gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f632013-01-10 19:12:17 +000093
Christoph Lameter9b030cb2012-09-05 00:20:33 +000094/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080095int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050096
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070097struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
98 slab_flags_t flags, unsigned int useroffset,
99 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000100extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700101 unsigned int size, slab_flags_t flags,
102 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000103
Joonsoo Kim423c9292014-10-09 15:26:22 -0700104int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700105struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800106 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700107#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800108struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700109__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800110 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700111
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700112slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov37540002021-02-24 12:00:58 -0800113 slab_flags_t flags, const char *name);
Christoph Lametercbb79692012-09-05 00:18:32 +0000114#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800115static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800117 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000118{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700119
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov37540002021-02-24 12:00:58 -0800121 slab_flags_t flags, const char *name)
Joonsoo Kim423c9292014-10-09 15:26:22 -0700122{
123 return flags;
124}
Christoph Lametercbb79692012-09-05 00:18:32 +0000125#endif
126
127
Glauber Costad8843922012-10-17 15:36:51 +0400128/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700129#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
130 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400132
133#if defined(CONFIG_DEBUG_SLAB)
134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135#elif defined(CONFIG_SLUB_DEBUG)
136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400138#else
139#define SLAB_DEBUG_FLAGS (0)
140#endif
141
142#if defined(CONFIG_SLAB)
143#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800145 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400146#elif defined(CONFIG_SLUB)
147#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800148 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400149#else
150#define SLAB_CACHE_FLAGS (0)
151#endif
152
Thomas Garniere70954f2016-12-12 16:41:38 -0800153/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400154#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
155
Thomas Garniere70954f2016-12-12 16:41:38 -0800156/* Common flags permitted for kmem_cache_create */
157#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
158 SLAB_RED_ZONE | \
159 SLAB_POISON | \
160 SLAB_STORE_USER | \
161 SLAB_TRACE | \
162 SLAB_CONSISTENCY_CHECKS | \
163 SLAB_MEM_SPREAD | \
164 SLAB_NOLEAKTRACE | \
165 SLAB_RECLAIM_ACCOUNT | \
166 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800167 SLAB_ACCOUNT)
168
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700169bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000170int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800171void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800172int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700173void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000174
Glauber Costab7454ad2012-10-19 18:20:25 +0400175struct seq_file;
176struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400177
Glauber Costa0d7561c2012-10-19 18:20:27 +0400178struct slabinfo {
179 unsigned long active_objs;
180 unsigned long num_objs;
181 unsigned long active_slabs;
182 unsigned long num_slabs;
183 unsigned long shared_avail;
184 unsigned int limit;
185 unsigned int batchcount;
186 unsigned int shared;
187 unsigned int objects_per_slab;
188 unsigned int cache_order;
189};
190
191void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
192void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400193ssize_t slabinfo_write(struct file *file, const char __user *buffer,
194 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800195
Christoph Lameter484748f2015-09-04 15:45:34 -0700196/*
197 * Generic implementation of bulk operations
198 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700199 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700200 * may be allocated or freed using these operations.
201 */
202void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800203int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700204
Muchun Song1a984c42020-12-14 19:06:24 -0800205static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700206{
207 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700208 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700209}
210
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700211#ifdef CONFIG_SLUB_DEBUG
212#ifdef CONFIG_SLUB_DEBUG_ON
213DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
214#else
215DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
216#endif
217extern void print_tracking(struct kmem_cache *s, void *object);
218#else
219static inline void print_tracking(struct kmem_cache *s, void *object)
220{
221}
222#endif
223
224/*
225 * Returns true if any of the specified slub_debug flags is enabled for the
226 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
227 * the static key.
228 */
229static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
230{
231#ifdef CONFIG_SLUB_DEBUG
232 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
233 if (static_branch_unlikely(&slub_debug_enabled))
234 return s->flags & flags;
235#endif
236 return false;
237}
238
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700239#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin10befea2020-08-06 23:21:27 -0700240int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800241 gfp_t gfp, bool new_page);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700242
243static inline void memcg_free_page_obj_cgroups(struct page *page)
244{
Roman Gushchin270c6a72020-12-01 13:58:28 -0800245 kfree(page_objcgs(page));
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800246 page->memcg_data = 0;
Roman Gushchin286e04b2020-08-06 23:20:52 -0700247}
248
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700249static inline size_t obj_full_size(struct kmem_cache *s)
250{
251 /*
252 * For each accounted object there is an extra space which is used
253 * to store obj_cgroup membership. Charge it too.
254 */
255 return s->size + sizeof(struct obj_cgroup *);
256}
257
Roman Gushchinbecaba62020-12-05 22:14:45 -0800258/*
259 * Returns false if the allocation should fail.
260 */
261static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
262 struct obj_cgroup **objcgp,
263 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700264{
Roman Gushchin98556092020-08-06 23:21:10 -0700265 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700266
Roman Gushchinbecaba62020-12-05 22:14:45 -0800267 if (!memcg_kmem_enabled())
268 return true;
269
270 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
271 return true;
272
Roman Gushchin98556092020-08-06 23:21:10 -0700273 objcg = get_obj_cgroup_from_current();
274 if (!objcg)
Roman Gushchinbecaba62020-12-05 22:14:45 -0800275 return true;
Roman Gushchin98556092020-08-06 23:21:10 -0700276
277 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
278 obj_cgroup_put(objcg);
Roman Gushchinbecaba62020-12-05 22:14:45 -0800279 return false;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700280 }
281
Roman Gushchinbecaba62020-12-05 22:14:45 -0800282 *objcgp = objcg;
283 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700284}
285
286static inline void mod_objcg_state(struct obj_cgroup *objcg,
287 struct pglist_data *pgdat,
Muchun Song1a984c42020-12-14 19:06:24 -0800288 enum node_stat_item idx, int nr)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700289{
290 struct mem_cgroup *memcg;
291 struct lruvec *lruvec;
292
293 rcu_read_lock();
294 memcg = obj_cgroup_memcg(objcg);
295 lruvec = mem_cgroup_lruvec(memcg, pgdat);
296 mod_memcg_lruvec_state(lruvec, idx, nr);
297 rcu_read_unlock();
298}
299
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700300static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
301 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700302 gfp_t flags, size_t size,
303 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700304{
305 struct page *page;
306 unsigned long off;
307 size_t i;
308
Roman Gushchinbecaba62020-12-05 22:14:45 -0800309 if (!memcg_kmem_enabled() || !objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700310 return;
311
312 flags &= ~__GFP_ACCOUNT;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700313 for (i = 0; i < size; i++) {
314 if (likely(p[i])) {
315 page = virt_to_head_page(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700316
Roman Gushchin270c6a72020-12-01 13:58:28 -0800317 if (!page_objcgs(page) &&
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800318 memcg_alloc_page_obj_cgroups(page, s, flags,
319 false)) {
Roman Gushchin10befea2020-08-06 23:21:27 -0700320 obj_cgroup_uncharge(objcg, obj_full_size(s));
321 continue;
322 }
323
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700324 off = obj_to_index(s, page, p[i]);
325 obj_cgroup_get(objcg);
Roman Gushchin270c6a72020-12-01 13:58:28 -0800326 page_objcgs(page)[off] = objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700327 mod_objcg_state(objcg, page_pgdat(page),
328 cache_vmstat_idx(s), obj_full_size(s));
329 } else {
330 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700331 }
332 }
333 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700334}
335
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700336static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
337 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700338{
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700339 struct kmem_cache *s;
Roman Gushchin270c6a72020-12-01 13:58:28 -0800340 struct obj_cgroup **objcgs;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700341 struct obj_cgroup *objcg;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700342 struct page *page;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700343 unsigned int off;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700344 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700345
Roman Gushchin10befea2020-08-06 23:21:27 -0700346 if (!memcg_kmem_enabled())
347 return;
348
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700349 for (i = 0; i < objects; i++) {
350 if (unlikely(!p[i]))
351 continue;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700352
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700353 page = virt_to_head_page(p[i]);
Roman Gushchin270c6a72020-12-01 13:58:28 -0800354 objcgs = page_objcgs(page);
355 if (!objcgs)
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700356 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700357
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700358 if (!s_orig)
359 s = page->slab_cache;
360 else
361 s = s_orig;
Roman Gushchin10befea2020-08-06 23:21:27 -0700362
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700363 off = obj_to_index(s, page, p[i]);
Roman Gushchin270c6a72020-12-01 13:58:28 -0800364 objcg = objcgs[off];
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700365 if (!objcg)
366 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700367
Roman Gushchin270c6a72020-12-01 13:58:28 -0800368 objcgs[off] = NULL;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700369 obj_cgroup_uncharge(objcg, obj_full_size(s));
370 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
371 -obj_full_size(s));
372 obj_cgroup_put(objcg);
373 }
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700374}
375
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700376#else /* CONFIG_MEMCG_KMEM */
Roman Gushchin98556092020-08-06 23:21:10 -0700377static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700378{
379 return NULL;
380}
381
Roman Gushchin286e04b2020-08-06 23:20:52 -0700382static inline int memcg_alloc_page_obj_cgroups(struct page *page,
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800383 struct kmem_cache *s, gfp_t gfp,
384 bool new_page)
Roman Gushchin286e04b2020-08-06 23:20:52 -0700385{
386 return 0;
387}
388
389static inline void memcg_free_page_obj_cgroups(struct page *page)
390{
391}
392
Roman Gushchinbecaba62020-12-05 22:14:45 -0800393static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
394 struct obj_cgroup **objcgp,
395 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700396{
Roman Gushchinbecaba62020-12-05 22:14:45 -0800397 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700398}
399
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700400static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
401 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700402 gfp_t flags, size_t size,
403 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700404{
405}
406
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700407static inline void memcg_slab_free_hook(struct kmem_cache *s,
408 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700409{
410}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700411#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800412
Kees Cooka64b5372019-07-11 20:53:26 -0700413static inline struct kmem_cache *virt_to_cache(const void *obj)
414{
415 struct page *page;
416
417 page = virt_to_head_page(obj);
418 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
419 __func__))
420 return NULL;
421 return page->slab_cache;
422}
423
Roman Gushchin74d555b2020-08-06 23:21:44 -0700424static __always_inline void account_slab_page(struct page *page, int order,
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800425 struct kmem_cache *s,
426 gfp_t gfp)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700427{
Roman Gushchin2e9bd482021-02-24 12:03:11 -0800428 if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
429 memcg_alloc_page_obj_cgroups(page, s, gfp, true);
430
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700431 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
432 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700433}
434
Roman Gushchin74d555b2020-08-06 23:21:44 -0700435static __always_inline void unaccount_slab_page(struct page *page, int order,
436 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700437{
Roman Gushchin10befea2020-08-06 23:21:27 -0700438 if (memcg_kmem_enabled())
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700439 memcg_free_page_obj_cgroups(page);
Roman Gushchin98556092020-08-06 23:21:10 -0700440
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700441 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
442 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700443}
444
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700445static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
446{
447 struct kmem_cache *cachep;
448
449 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700450 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
451 return s;
452
453 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700454 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700455 "%s: Wrong slab cache. %s but object is from %s\n",
456 __func__, s->name, cachep->name))
457 print_tracking(cachep, x);
458 return cachep;
459}
460
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700461static inline size_t slab_ksize(const struct kmem_cache *s)
462{
463#ifndef CONFIG_SLUB
464 return s->object_size;
465
466#else /* CONFIG_SLUB */
467# ifdef CONFIG_SLUB_DEBUG
468 /*
469 * Debugging requires use of the padding between object
470 * and whatever may come after it.
471 */
472 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
473 return s->object_size;
474# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700475 if (s->flags & SLAB_KASAN)
476 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700477 /*
478 * If we have the need to store the freelist pointer
479 * back there or track user information then we can
480 * only use the space before that information.
481 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800482 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700483 return s->inuse;
484 /*
485 * Else we can use all the padding etc for the allocation
486 */
487 return s->size;
488#endif
489}
490
491static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700492 struct obj_cgroup **objcgp,
493 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700494{
495 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100496
Daniel Vetter95d6c702020-12-14 19:08:34 -0800497 might_alloc(flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700498
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700499 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700500 return NULL;
501
Roman Gushchinbecaba62020-12-05 22:14:45 -0800502 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
503 return NULL;
Vladimir Davydov45264772016-07-26 15:24:21 -0700504
505 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700506}
507
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700508static inline void slab_post_alloc_hook(struct kmem_cache *s,
Andrey Konovalovda844b72021-04-29 23:00:06 -0700509 struct obj_cgroup *objcg, gfp_t flags,
510 size_t size, void **p, bool init)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700511{
512 size_t i;
513
514 flags &= gfp_allowed_mask;
Andrey Konovalovda844b72021-04-29 23:00:06 -0700515
516 /*
517 * As memory initialization might be integrated into KASAN,
518 * kasan_slab_alloc and initialization memset must be
519 * kept together to avoid discrepancies in behavior.
520 *
521 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
522 */
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700523 for (i = 0; i < size; i++) {
Andrey Konovalovda844b72021-04-29 23:00:06 -0700524 p[i] = kasan_slab_alloc(s, p[i], flags, init);
525 if (p[i] && init && !kasan_has_integrated_init())
526 memset(p[i], 0, s->object_size);
Andrey Konovalov53128242019-02-20 22:19:11 -0800527 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700528 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700529 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700530
Roman Gushchinbecaba62020-12-05 22:14:45 -0800531 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700532}
533
Christoph Lameter44c53562014-08-06 16:04:07 -0700534#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000535/*
536 * The slab lists for all objects.
537 */
538struct kmem_cache_node {
539 spinlock_t list_lock;
540
541#ifdef CONFIG_SLAB
542 struct list_head slabs_partial; /* partial list first, better asm code */
543 struct list_head slabs_full;
544 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800545 unsigned long total_slabs; /* length of all slab lists */
546 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000547 unsigned long free_objects;
548 unsigned int free_limit;
549 unsigned int colour_next; /* Per-node cache coloring */
550 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700551 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000552 unsigned long next_reap; /* updated without locking */
553 int free_touched; /* updated without locking */
554#endif
555
556#ifdef CONFIG_SLUB
557 unsigned long nr_partial;
558 struct list_head partial;
559#ifdef CONFIG_SLUB_DEBUG
560 atomic_long_t nr_slabs;
561 atomic_long_t total_objects;
562 struct list_head full;
563#endif
564#endif
565
566};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800567
Christoph Lameter44c53562014-08-06 16:04:07 -0700568static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
569{
570 return s->node[node];
571}
572
573/*
574 * Iterator over all nodes. The body will be executed for each node that has
575 * a kmem_cache_node structure allocated (which is true for all online nodes)
576 */
577#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700578 for (__node = 0; __node < nr_node_ids; __node++) \
579 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700580
581#endif
582
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800583void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800584void *slab_next(struct seq_file *m, void *p, loff_t *pos);
585void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800586int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700587
Yang Shi852d8be2017-11-15 17:32:07 -0800588#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
589void dump_unreclaimable_slab(void);
590#else
591static inline void dump_unreclaimable_slab(void)
592{
593}
594#endif
595
Alexander Potapenko55834c52016-05-20 16:59:11 -0700596void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
597
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700598#ifdef CONFIG_SLAB_FREELIST_RANDOM
599int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
600 gfp_t gfp);
601void cache_random_seq_destroy(struct kmem_cache *cachep);
602#else
603static inline int cache_random_seq_create(struct kmem_cache *cachep,
604 unsigned int count, gfp_t gfp)
605{
606 return 0;
607}
608static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
609#endif /* CONFIG_SLAB_FREELIST_RANDOM */
610
Alexander Potapenko64713842019-07-11 20:59:19 -0700611static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
612{
Kees Cook51cba1e2021-04-01 16:23:43 -0700613 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
614 &init_on_alloc)) {
Alexander Potapenko64713842019-07-11 20:59:19 -0700615 if (c->ctor)
616 return false;
617 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
618 return flags & __GFP_ZERO;
619 return true;
620 }
621 return flags & __GFP_ZERO;
622}
623
624static inline bool slab_want_init_on_free(struct kmem_cache *c)
625{
Kees Cook51cba1e2021-04-01 16:23:43 -0700626 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
627 &init_on_free))
Alexander Potapenko64713842019-07-11 20:59:19 -0700628 return !(c->ctor ||
629 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
630 return false;
631}
632
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -0800633#ifdef CONFIG_PRINTK
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800634#define KS_ADDRS_COUNT 16
635struct kmem_obj_info {
636 void *kp_ptr;
637 struct page *kp_page;
638 void *kp_objp;
639 unsigned long kp_data_offset;
640 struct kmem_cache *kp_slab_cache;
641 void *kp_ret;
642 void *kp_stack[KS_ADDRS_COUNT];
643};
644void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
Paul E. McKenney5bb1bb32021-01-07 13:46:11 -0800645#endif
Paul E. McKenney8e7f37f2020-12-07 17:41:02 -0800646
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700647#endif /* MM_SLAB_H */