blob: c7b228097bd98f1fd89c88093380151380884073 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Sasha Levin31c9afa2014-10-09 15:28:37 -07002/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
Sasha Levin82742a32014-10-09 15:28:34 -07009#include <linux/kernel.h>
10#include <linux/mm.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040011#include <linux/trace_events.h>
Sasha Levin82742a32014-10-09 15:28:34 -070012#include <linux/memcontrol.h>
Vlastimil Babka420adbe92016-03-15 14:55:52 -070013#include <trace/events/mmflags.h>
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070014#include <linux/migrate.h>
Vlastimil Babka4e462112016-03-15 14:56:21 -070015#include <linux/page_owner.h>
Alexander Duyckf682a972018-10-26 15:07:45 -070016#include <linux/ctype.h>
Sasha Levin82742a32014-10-09 15:28:34 -070017
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070018#include "internal.h"
John Hubbard8eb42be2021-11-05 13:43:32 -070019#include <trace/events/migrate.h>
20
21/*
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
24 */
25#undef EM
26#undef EMe
27#define EM(a, b) b,
28#define EMe(a, b) b
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070029
Alexey Dobriyan9a2f45f2018-12-28 00:35:59 -080030const char *migrate_reason_names[MR_TYPES] = {
John Hubbard8eb42be2021-11-05 13:43:32 -070031 MIGRATE_REASON
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070032};
33
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070034const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
Vlastimil Babka420adbe92016-03-15 14:55:52 -070037};
38
Hyeonggon Yoo4c85c0b2023-01-30 13:25:13 +090039const struct trace_print_flags pagetype_names[] = {
40 __def_pagetype_names,
41 {0, NULL}
42};
43
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070044const struct trace_print_flags gfpflag_names[] = {
45 __def_gfpflag_names,
46 {0, NULL}
47};
48
49const struct trace_print_flags vmaflag_names[] = {
50 __def_vmaflag_names,
51 {0, NULL}
Sasha Levin82742a32014-10-09 15:28:34 -070052};
53
Matthew Wilcox (Oracle)be7c7012021-06-28 19:41:13 -070054static void __dump_page(struct page *page)
Sasha Levin82742a32014-10-09 15:28:34 -070055{
Matthew Wilcox (Oracle)74e8ee42022-01-18 10:50:48 -050056 struct folio *folio = page_folio(page);
57 struct page *head = &folio->page;
Robin Murphy311ade0e2019-02-20 22:19:45 -080058 struct address_space *mapping;
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070059 bool compound = PageCompound(page);
Qian Cai4a55c042020-01-30 22:14:57 -080060 /*
61 * Accessing the pageblock without the zone lock. It could change to
62 * "isolate" again in the meantime, but since we are just dumping the
63 * state for debugging, it should be fine to accept a bit of
64 * inaccuracy here due to racing.
65 */
66 bool page_cma = is_migrate_cma_page(page);
Pavel Tatashinfc36def2018-07-03 17:02:53 -070067 int mapcount;
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -080068 char *type = "";
Pavel Tatashinfc36def2018-07-03 17:02:53 -070069
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070070 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
Matthew Wilcox (Oracle)e1ab96f2020-08-06 23:19:32 -070071 /*
72 * Corrupt page, so we cannot call page_mapping. Instead, do a
73 * safe subset of the steps that page_mapping() does. Caution:
74 * this will be misleading for tail pages, PageSwapCache pages,
75 * and potentially other situations. (See the page_mapping()
76 * implementation for what's missing here.)
77 */
78 unsigned long tmp = (unsigned long)page->mapping;
79
80 if (tmp & PAGE_MAPPING_ANON)
81 mapping = NULL;
82 else
83 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070084 head = page;
Matthew Wilcox (Oracle)74e8ee42022-01-18 10:50:48 -050085 folio = (struct folio *)page;
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070086 compound = false;
87 } else {
88 mapping = page_mapping(page);
89 }
Robin Murphy311ade0e2019-02-20 22:19:45 -080090
Kirill A. Shutemov9996f052016-10-07 17:01:40 -070091 /*
92 * Avoid VM_BUG_ON() in page_mapcount().
93 * page->_mapcount space in struct page is used by sl[aou]b pages to
94 * encode own info.
95 */
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070096 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
Kirill A. Shutemov4d354272016-09-19 14:44:07 -070097
Matthew Wilcox (Oracle)54a75152020-08-06 23:19:48 -070098 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -070099 page, page_ref_count(head), mapcount, mapping,
Matthew Wilcox (Oracle)54a75152020-08-06 23:19:48 -0700100 page_to_pgoff(page), page_to_pfn(page));
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -0700101 if (compound) {
Matthew Wilcox (Oracle)91ec7f22023-01-11 14:28:58 +0000102 pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
Matthew Wilcox (Oracle)5232c632022-01-06 16:46:43 -0500103 head, compound_order(head),
Matthew Wilcox (Oracle)91ec7f22023-01-11 14:28:58 +0000104 folio_entire_mapcount(folio),
Matthew Wilcox (Oracle)eec20422023-01-11 14:28:48 +0000105 folio_nr_pages_mapped(folio),
Matthew Wilcox (Oracle)94688e82023-01-11 14:28:47 +0000106 atomic_read(&folio->_pincount));
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -0700107 }
Matthew Wilcox (Oracle)91f53452021-02-24 12:01:29 -0800108
109#ifdef CONFIG_MEMCG
110 if (head->memcg_data)
111 pr_warn("memcg:%lx\n", head->memcg_data);
112#endif
Ralph Campbell6855ac42019-11-15 17:35:07 -0800113 if (PageKsm(page))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -0800114 type = "ksm ";
Ralph Campbell6855ac42019-11-15 17:35:07 -0800115 else if (PageAnon(page))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -0800116 type = "anon ";
Matthew Wilcox (Oracle)3e9d80a82022-01-14 14:05:04 -0800117 else if (mapping)
118 dump_mapping(mapping);
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -0700119 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700120
Matthew Wilcox (Oracle)23efd082021-10-19 15:26:21 +0100121 pr_warn("%sflags: %pGp%s\n", type, &head->flags,
Qian Cai4a55c042020-01-30 22:14:57 -0800122 page_cma ? " CMA" : "");
Hyeonggon Yoof2421a12023-01-30 13:25:14 +0900123 pr_warn("page_type: %pGt\n", &head->page_type);
124
Michal Hockoe0392cf2018-12-28 00:33:42 -0800125 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
Vlastimil Babka46e8a3a2016-12-12 16:44:35 -0800126 sizeof(unsigned long), page,
127 sizeof(struct page), false);
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -0700128 if (head != page)
129 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
130 sizeof(unsigned long), head,
131 sizeof(struct page), false);
Sasha Levin82742a32014-10-09 15:28:34 -0700132}
133
134void dump_page(struct page *page, const char *reason)
135{
Matthew Wilcox (Oracle)be7c7012021-06-28 19:41:13 -0700136 if (PagePoisoned(page))
137 pr_warn("page:%p is uninitialized and poisoned", page);
138 else
139 __dump_page(page);
140 if (reason)
141 pr_warn("page dumped because: %s\n", reason);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700142 dump_page_owner(page);
Sasha Levin82742a32014-10-09 15:28:34 -0700143}
144EXPORT_SYMBOL(dump_page);
145
146#ifdef CONFIG_DEBUG_VM
147
Sasha Levin82742a32014-10-09 15:28:34 -0700148void dump_vma(const struct vm_area_struct *vma)
149{
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000150 pr_emerg("vma %px start %px end %px mm %px\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800151 "prot %lx anon_vma %px vm_ops %px\n"
152 "pgoff %lx file %px private_data %px\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700153 "flags: %#lx(%pGv)\n",
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000154 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
Sasha Levin82742a32014-10-09 15:28:34 -0700155 (unsigned long)pgprot_val(vma->vm_page_prot),
156 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700157 vma->vm_file, vma->vm_private_data,
158 vma->vm_flags, &vma->vm_flags);
Sasha Levin82742a32014-10-09 15:28:34 -0700159}
160EXPORT_SYMBOL(dump_vma);
161
Sasha Levin31c9afa2014-10-09 15:28:37 -0700162void dump_mm(const struct mm_struct *mm)
163{
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000164 pr_emerg("mm %px task_size %lu\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700165#ifdef CONFIG_MMU
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800166 "get_unmapped_area %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700167#endif
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000168 "mmap_base %lu mmap_legacy_base %lu\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800169 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700170 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800171 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700172 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
173 "start_brk %lx brk %lx start_stack %lx\n"
174 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500175 "binfmt %px flags %lx\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700176#ifdef CONFIG_AIO
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800177 "ioctx_table %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700178#endif
179#ifdef CONFIG_MEMCG
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800180 "owner %px "
Sasha Levin31c9afa2014-10-09 15:28:37 -0700181#endif
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800182 "exe_file %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700183#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400184 "notifier_subscriptions %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700185#endif
186#ifdef CONFIG_NUMA_BALANCING
187 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
188#endif
Sasha Levin31c9afa2014-10-09 15:28:37 -0700189 "tlb_flush_pending %d\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700190 "def_flags: %#lx(%pGv)\n",
Sasha Levin31c9afa2014-10-09 15:28:37 -0700191
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000192 mm, mm->task_size,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700193#ifdef CONFIG_MMU
194 mm->get_unmapped_area,
195#endif
Liam R. Howlett763ecb02022-09-06 19:49:06 +0000196 mm->mmap_base, mm->mmap_legacy_base,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700197 mm->pgd, atomic_read(&mm->mm_users),
198 atomic_read(&mm->mm_count),
Kirill A. Shutemovaf5b0f62017-11-15 17:35:40 -0800199 mm_pgtables_bytes(mm),
Sasha Levin31c9afa2014-10-09 15:28:37 -0700200 mm->map_count,
201 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
Qian Cai44dc1b12019-03-28 20:43:23 -0700202 (u64)atomic64_read(&mm->pinned_vm),
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800203 mm->data_vm, mm->exec_vm, mm->stack_vm,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700204 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
205 mm->start_brk, mm->brk, mm->start_stack,
206 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500207 mm->binfmt, mm->flags,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700208#ifdef CONFIG_AIO
209 mm->ioctx_table,
210#endif
211#ifdef CONFIG_MEMCG
212 mm->owner,
213#endif
214 mm->exe_file,
215#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400216 mm->notifier_subscriptions,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700217#endif
218#ifdef CONFIG_NUMA_BALANCING
219 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
220#endif
Nadav Amit16af97d2017-08-10 15:23:56 -0700221 atomic_read(&mm->tlb_flush_pending),
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700222 mm->def_flags, &mm->def_flags
223 );
Sasha Levin31c9afa2014-10-09 15:28:37 -0700224}
Suren Baghdasaryanc2fdc232023-01-26 11:37:52 -0800225EXPORT_SYMBOL(dump_mm);
Sasha Levin31c9afa2014-10-09 15:28:37 -0700226
Alexander Duyckf682a972018-10-26 15:07:45 -0700227static bool page_init_poisoning __read_mostly = true;
228
229static int __init setup_vm_debug(char *str)
230{
231 bool __page_init_poisoning = true;
232
233 /*
234 * Calling vm_debug with no arguments is equivalent to requesting
235 * to enable all debugging options we can control.
236 */
237 if (*str++ != '=' || !*str)
238 goto out;
239
240 __page_init_poisoning = false;
241 if (*str == '-')
242 goto out;
243
244 while (*str) {
245 switch (tolower(*str)) {
246 case'p':
247 __page_init_poisoning = true;
248 break;
249 default:
250 pr_err("vm_debug option '%c' unknown. skipped\n",
251 *str);
252 }
253
254 str++;
255 }
256out:
257 if (page_init_poisoning && !__page_init_poisoning)
258 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
259
260 page_init_poisoning = __page_init_poisoning;
261
262 return 1;
263}
264__setup("vm_debug", setup_vm_debug);
265
266void page_init_poison(struct page *page, size_t size)
267{
268 if (page_init_poisoning)
269 memset(page, PAGE_POISON_PATTERN, size);
270}
Sasha Levin82742a32014-10-09 15:28:34 -0700271#endif /* CONFIG_DEBUG_VM */