blob: aa57d3ffd4edf6d8d8834f63c1a13f40f5287b89 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Sasha Levin31c9afa2014-10-09 15:28:37 -07002/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
Sasha Levin82742a32014-10-09 15:28:34 -07009#include <linux/kernel.h>
10#include <linux/mm.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040011#include <linux/trace_events.h>
Sasha Levin82742a32014-10-09 15:28:34 -070012#include <linux/memcontrol.h>
Vlastimil Babka420adbe92016-03-15 14:55:52 -070013#include <trace/events/mmflags.h>
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070014#include <linux/migrate.h>
Vlastimil Babka4e462112016-03-15 14:56:21 -070015#include <linux/page_owner.h>
Alexander Duyckf682a972018-10-26 15:07:45 -070016#include <linux/ctype.h>
Sasha Levin82742a32014-10-09 15:28:34 -070017
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070018#include "internal.h"
John Hubbard8eb42be2021-11-05 13:43:32 -070019#include <trace/events/migrate.h>
20
21/*
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
24 */
25#undef EM
26#undef EMe
27#define EM(a, b) b,
28#define EMe(a, b) b
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070029
Alexey Dobriyan9a2f45f2018-12-28 00:35:59 -080030const char *migrate_reason_names[MR_TYPES] = {
John Hubbard8eb42be2021-11-05 13:43:32 -070031 MIGRATE_REASON
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070032};
33
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070034const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
Vlastimil Babka420adbe92016-03-15 14:55:52 -070037};
38
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070039const struct trace_print_flags gfpflag_names[] = {
40 __def_gfpflag_names,
41 {0, NULL}
42};
43
44const struct trace_print_flags vmaflag_names[] = {
45 __def_vmaflag_names,
46 {0, NULL}
Sasha Levin82742a32014-10-09 15:28:34 -070047};
48
Matthew Wilcox (Oracle)4ffca5a2024-08-21 18:39:11 +010049#define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name)
50
51static const char *page_type_names[] = {
52 DEF_PAGETYPE_NAME(slab),
53 DEF_PAGETYPE_NAME(hugetlb),
54 DEF_PAGETYPE_NAME(offline),
55 DEF_PAGETYPE_NAME(guard),
56 DEF_PAGETYPE_NAME(table),
57 DEF_PAGETYPE_NAME(buddy),
58 DEF_PAGETYPE_NAME(unaccepted),
59};
60
61static const char *page_type_name(unsigned int page_type)
62{
63 unsigned i = (page_type >> 24) - 0xf0;
64
65 if (i >= ARRAY_SIZE(page_type_names))
66 return "unknown";
67 return page_type_names[i];
68}
69
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +000070static void __dump_folio(struct folio *folio, struct page *page,
71 unsigned long pfn, unsigned long idx)
Sasha Levin82742a32014-10-09 15:28:34 -070072{
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +000073 struct address_space *mapping = folio_mapping(folio);
David Hildenbrand7441d342024-04-09 21:23:00 +020074 int mapcount = atomic_read(&page->_mapcount);
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -080075 char *type = "";
Pavel Tatashinfc36def2018-07-03 17:02:53 -070076
Matthew Wilcox (Oracle)4ffca5a2024-08-21 18:39:11 +010077 mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1;
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +000078 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
79 folio_ref_count(folio), mapcount, mapping,
80 folio->index + idx, pfn);
81 if (folio_test_large(folio)) {
David Hildenbrand05c53232024-04-09 21:22:47 +020082 pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +000083 folio_order(folio),
David Hildenbrand05c53232024-04-09 21:22:47 +020084 folio_mapcount(folio),
Matthew Wilcox (Oracle)91ec7f22023-01-11 14:28:58 +000085 folio_entire_mapcount(folio),
Matthew Wilcox (Oracle)eec20422023-01-11 14:28:48 +000086 folio_nr_pages_mapped(folio),
Matthew Wilcox (Oracle)94688e82023-01-11 14:28:47 +000087 atomic_read(&folio->_pincount));
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -070088 }
Matthew Wilcox (Oracle)91f53452021-02-24 12:01:29 -080089
90#ifdef CONFIG_MEMCG
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +000091 if (folio->memcg_data)
92 pr_warn("memcg:%lx\n", folio->memcg_data);
Matthew Wilcox (Oracle)91f53452021-02-24 12:01:29 -080093#endif
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +000094 if (folio_test_ksm(folio))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -080095 type = "ksm ";
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +000096 else if (folio_test_anon(folio))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -080097 type = "anon ";
Matthew Wilcox (Oracle)3e9d80a82022-01-14 14:05:04 -080098 else if (mapping)
99 dump_mapping(mapping);
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -0700100 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700101
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +0000102 /*
103 * Accessing the pageblock without the zone lock. It could change to
104 * "isolate" again in the meantime, but since we are just dumping the
105 * state for debugging, it should be fine to accept a bit of
106 * inaccuracy here due to racing.
107 */
108 pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
109 is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
Matthew Wilcox (Oracle)8f790d02024-03-21 14:24:46 +0000110 if (page_has_type(&folio->page))
Matthew Wilcox (Oracle)4ffca5a2024-08-21 18:39:11 +0100111 pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
112 page_type_name(folio->page.page_type));
Hyeonggon Yoof2421a12023-01-30 13:25:14 +0900113
Michal Hockoe0392cf2018-12-28 00:33:42 -0800114 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
Vlastimil Babka46e8a3a2016-12-12 16:44:35 -0800115 sizeof(unsigned long), page,
116 sizeof(struct page), false);
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +0000117 if (folio_test_large(folio))
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -0700118 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
Matthew Wilcox (Oracle)fae7d832024-02-27 19:23:31 +0000119 sizeof(unsigned long), folio,
120 2 * sizeof(struct page), false);
121}
122
123static void __dump_page(const struct page *page)
124{
125 struct folio *foliop, folio;
126 struct page precise;
127 unsigned long pfn = page_to_pfn(page);
128 unsigned long idx, nr_pages = 1;
129 int loops = 5;
130
131again:
132 memcpy(&precise, page, sizeof(*page));
133 foliop = page_folio(&precise);
134 if (foliop == (struct folio *)&precise) {
135 idx = 0;
136 if (!folio_test_large(foliop))
137 goto dump;
138 foliop = (struct folio *)page;
139 } else {
140 idx = folio_page_idx(foliop, page);
141 }
142
143 if (idx < MAX_FOLIO_NR_PAGES) {
144 memcpy(&folio, foliop, 2 * sizeof(struct page));
145 nr_pages = folio_nr_pages(&folio);
146 foliop = &folio;
147 }
148
149 if (idx > nr_pages) {
150 if (loops-- > 0)
151 goto again;
152 pr_warn("page does not match folio\n");
153 precise.compound_head &= ~1UL;
154 foliop = (struct folio *)&precise;
155 idx = 0;
156 }
157
158dump:
159 __dump_folio(foliop, &precise, pfn, idx);
Sasha Levin82742a32014-10-09 15:28:34 -0700160}
161
Matthew Wilcox (Oracle)b3a32032024-02-27 19:23:32 +0000162void dump_page(const struct page *page, const char *reason)
Sasha Levin82742a32014-10-09 15:28:34 -0700163{
Matthew Wilcox (Oracle)be7c7012021-06-28 19:41:13 -0700164 if (PagePoisoned(page))
165 pr_warn("page:%p is uninitialized and poisoned", page);
166 else
167 __dump_page(page);
168 if (reason)
169 pr_warn("page dumped because: %s\n", reason);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700170 dump_page_owner(page);
Sasha Levin82742a32014-10-09 15:28:34 -0700171}
172EXPORT_SYMBOL(dump_page);
173
174#ifdef CONFIG_DEBUG_VM
175
Sasha Levin82742a32014-10-09 15:28:34 -0700176void dump_vma(const struct vm_area_struct *vma)
177{
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000178 pr_emerg("vma %px start %px end %px mm %px\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800179 "prot %lx anon_vma %px vm_ops %px\n"
180 "pgoff %lx file %px private_data %px\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700181 "flags: %#lx(%pGv)\n",
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000182 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
Sasha Levin82742a32014-10-09 15:28:34 -0700183 (unsigned long)pgprot_val(vma->vm_page_prot),
184 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700185 vma->vm_file, vma->vm_private_data,
186 vma->vm_flags, &vma->vm_flags);
Sasha Levin82742a32014-10-09 15:28:34 -0700187}
188EXPORT_SYMBOL(dump_vma);
189
Sasha Levin31c9afa2014-10-09 15:28:37 -0700190void dump_mm(const struct mm_struct *mm)
191{
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000192 pr_emerg("mm %px task_size %lu\n"
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000193 "mmap_base %lu mmap_legacy_base %lu\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800194 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700195 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800196 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700197 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
198 "start_brk %lx brk %lx start_stack %lx\n"
199 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500200 "binfmt %px flags %lx\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700201#ifdef CONFIG_AIO
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800202 "ioctx_table %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700203#endif
204#ifdef CONFIG_MEMCG
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800205 "owner %px "
Sasha Levin31c9afa2014-10-09 15:28:37 -0700206#endif
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800207 "exe_file %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700208#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400209 "notifier_subscriptions %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700210#endif
211#ifdef CONFIG_NUMA_BALANCING
212 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
213#endif
Sasha Levin31c9afa2014-10-09 15:28:37 -0700214 "tlb_flush_pending %d\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700215 "def_flags: %#lx(%pGv)\n",
Sasha Levin31c9afa2014-10-09 15:28:37 -0700216
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000217 mm, mm->task_size,
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000218 mm->mmap_base, mm->mmap_legacy_base,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700219 mm->pgd, atomic_read(&mm->mm_users),
220 atomic_read(&mm->mm_count),
Kirill A. Shutemovaf5b0f62017-11-15 17:35:40 -0800221 mm_pgtables_bytes(mm),
Sasha Levin31c9afa2014-10-09 15:28:37 -0700222 mm->map_count,
223 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
Qian Cai44dc1b12019-03-28 20:43:23 -0700224 (u64)atomic64_read(&mm->pinned_vm),
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800225 mm->data_vm, mm->exec_vm, mm->stack_vm,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700226 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
227 mm->start_brk, mm->brk, mm->start_stack,
228 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500229 mm->binfmt, mm->flags,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700230#ifdef CONFIG_AIO
231 mm->ioctx_table,
232#endif
233#ifdef CONFIG_MEMCG
234 mm->owner,
235#endif
236 mm->exe_file,
237#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400238 mm->notifier_subscriptions,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700239#endif
240#ifdef CONFIG_NUMA_BALANCING
241 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
242#endif
Nadav Amit16af97d2017-08-10 15:23:56 -0700243 atomic_read(&mm->tlb_flush_pending),
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700244 mm->def_flags, &mm->def_flags
245 );
Sasha Levin31c9afa2014-10-09 15:28:37 -0700246}
Suren Baghdasaryanc2fdc232023-01-26 11:37:52 -0800247EXPORT_SYMBOL(dump_mm);
Sasha Levin31c9afa2014-10-09 15:28:37 -0700248
Alexander Duyckf682a972018-10-26 15:07:45 -0700249static bool page_init_poisoning __read_mostly = true;
250
251static int __init setup_vm_debug(char *str)
252{
253 bool __page_init_poisoning = true;
254
255 /*
256 * Calling vm_debug with no arguments is equivalent to requesting
257 * to enable all debugging options we can control.
258 */
259 if (*str++ != '=' || !*str)
260 goto out;
261
262 __page_init_poisoning = false;
263 if (*str == '-')
264 goto out;
265
266 while (*str) {
267 switch (tolower(*str)) {
268 case'p':
269 __page_init_poisoning = true;
270 break;
271 default:
272 pr_err("vm_debug option '%c' unknown. skipped\n",
273 *str);
274 }
275
276 str++;
277 }
278out:
279 if (page_init_poisoning && !__page_init_poisoning)
280 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
281
282 page_init_poisoning = __page_init_poisoning;
283
284 return 1;
285}
286__setup("vm_debug", setup_vm_debug);
287
288void page_init_poison(struct page *page, size_t size)
289{
290 if (page_init_poisoning)
291 memset(page, PAGE_POISON_PATTERN, size);
292}
Liam R. Howlettb50e1952023-05-18 10:55:26 -0400293
294void vma_iter_dump_tree(const struct vma_iterator *vmi)
295{
296#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
297 mas_dump(&vmi->mas);
298 mt_dump(vmi->mas.tree, mt_dump_hex);
299#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
300}
301
Sasha Levin82742a32014-10-09 15:28:34 -0700302#endif /* CONFIG_DEBUG_VM */