Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 2 | /* |
| 3 | * mm/debug.c |
| 4 | * |
| 5 | * mm/ specific debug routines. |
| 6 | * |
| 7 | */ |
| 8 | |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/mm.h> |
Steven Rostedt (Red Hat) | af658dc | 2015-04-29 14:36:05 -0400 | [diff] [blame] | 11 | #include <linux/trace_events.h> |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 12 | #include <linux/memcontrol.h> |
Vlastimil Babka | 420adbe9 | 2016-03-15 14:55:52 -0700 | [diff] [blame] | 13 | #include <trace/events/mmflags.h> |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 14 | #include <linux/migrate.h> |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 15 | #include <linux/page_owner.h> |
Alexander Duyck | f682a97 | 2018-10-26 15:07:45 -0700 | [diff] [blame] | 16 | #include <linux/ctype.h> |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 17 | |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 18 | #include "internal.h" |
John Hubbard | 8eb42be | 2021-11-05 13:43:32 -0700 | [diff] [blame] | 19 | #include <trace/events/migrate.h> |
| 20 | |
| 21 | /* |
| 22 | * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can |
| 23 | * be used to populate migrate_reason_names[]. |
| 24 | */ |
| 25 | #undef EM |
| 26 | #undef EMe |
| 27 | #define EM(a, b) b, |
| 28 | #define EMe(a, b) b |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 29 | |
Alexey Dobriyan | 9a2f45f | 2018-12-28 00:35:59 -0800 | [diff] [blame] | 30 | const char *migrate_reason_names[MR_TYPES] = { |
John Hubbard | 8eb42be | 2021-11-05 13:43:32 -0700 | [diff] [blame] | 31 | MIGRATE_REASON |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 32 | }; |
| 33 | |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 34 | const struct trace_print_flags pageflag_names[] = { |
| 35 | __def_pageflag_names, |
| 36 | {0, NULL} |
Vlastimil Babka | 420adbe9 | 2016-03-15 14:55:52 -0700 | [diff] [blame] | 37 | }; |
| 38 | |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 39 | const struct trace_print_flags gfpflag_names[] = { |
| 40 | __def_gfpflag_names, |
| 41 | {0, NULL} |
| 42 | }; |
| 43 | |
| 44 | const struct trace_print_flags vmaflag_names[] = { |
| 45 | __def_vmaflag_names, |
| 46 | {0, NULL} |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 47 | }; |
| 48 | |
Matthew Wilcox (Oracle) | 4ffca5a | 2024-08-21 18:39:11 +0100 | [diff] [blame] | 49 | #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name) |
| 50 | |
| 51 | static const char *page_type_names[] = { |
| 52 | DEF_PAGETYPE_NAME(slab), |
| 53 | DEF_PAGETYPE_NAME(hugetlb), |
| 54 | DEF_PAGETYPE_NAME(offline), |
| 55 | DEF_PAGETYPE_NAME(guard), |
| 56 | DEF_PAGETYPE_NAME(table), |
| 57 | DEF_PAGETYPE_NAME(buddy), |
| 58 | DEF_PAGETYPE_NAME(unaccepted), |
| 59 | }; |
| 60 | |
| 61 | static const char *page_type_name(unsigned int page_type) |
| 62 | { |
| 63 | unsigned i = (page_type >> 24) - 0xf0; |
| 64 | |
| 65 | if (i >= ARRAY_SIZE(page_type_names)) |
| 66 | return "unknown"; |
| 67 | return page_type_names[i]; |
| 68 | } |
| 69 | |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 70 | static void __dump_folio(struct folio *folio, struct page *page, |
| 71 | unsigned long pfn, unsigned long idx) |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 72 | { |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 73 | struct address_space *mapping = folio_mapping(folio); |
David Hildenbrand | 7441d34 | 2024-04-09 21:23:00 +0200 | [diff] [blame] | 74 | int mapcount = atomic_read(&page->_mapcount); |
Vlastimil Babka | 5b57b8f | 2020-01-30 22:12:03 -0800 | [diff] [blame] | 75 | char *type = ""; |
Pavel Tatashin | fc36def | 2018-07-03 17:02:53 -0700 | [diff] [blame] | 76 | |
Matthew Wilcox (Oracle) | 4ffca5a | 2024-08-21 18:39:11 +0100 | [diff] [blame] | 77 | mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1; |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 78 | pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", |
| 79 | folio_ref_count(folio), mapcount, mapping, |
| 80 | folio->index + idx, pfn); |
| 81 | if (folio_test_large(folio)) { |
David Hildenbrand | 05c5323 | 2024-04-09 21:22:47 +0200 | [diff] [blame] | 82 | pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 83 | folio_order(folio), |
David Hildenbrand | 05c5323 | 2024-04-09 21:22:47 +0200 | [diff] [blame] | 84 | folio_mapcount(folio), |
Matthew Wilcox (Oracle) | 91ec7f2 | 2023-01-11 14:28:58 +0000 | [diff] [blame] | 85 | folio_entire_mapcount(folio), |
Matthew Wilcox (Oracle) | eec2042 | 2023-01-11 14:28:48 +0000 | [diff] [blame] | 86 | folio_nr_pages_mapped(folio), |
Matthew Wilcox (Oracle) | 94688e8 | 2023-01-11 14:28:47 +0000 | [diff] [blame] | 87 | atomic_read(&folio->_pincount)); |
Matthew Wilcox (Oracle) | 452b557 | 2020-08-06 23:19:35 -0700 | [diff] [blame] | 88 | } |
Matthew Wilcox (Oracle) | 91f5345 | 2021-02-24 12:01:29 -0800 | [diff] [blame] | 89 | |
| 90 | #ifdef CONFIG_MEMCG |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 91 | if (folio->memcg_data) |
| 92 | pr_warn("memcg:%lx\n", folio->memcg_data); |
Matthew Wilcox (Oracle) | 91f5345 | 2021-02-24 12:01:29 -0800 | [diff] [blame] | 93 | #endif |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 94 | if (folio_test_ksm(folio)) |
Vlastimil Babka | 5b57b8f | 2020-01-30 22:12:03 -0800 | [diff] [blame] | 95 | type = "ksm "; |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 96 | else if (folio_test_anon(folio)) |
Vlastimil Babka | 5b57b8f | 2020-01-30 22:12:03 -0800 | [diff] [blame] | 97 | type = "anon "; |
Matthew Wilcox (Oracle) | 3e9d80a8 | 2022-01-14 14:05:04 -0800 | [diff] [blame] | 98 | else if (mapping) |
| 99 | dump_mapping(mapping); |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 100 | BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); |
Vlastimil Babka | ff8e811 | 2016-03-15 14:56:24 -0700 | [diff] [blame] | 101 | |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 102 | /* |
| 103 | * Accessing the pageblock without the zone lock. It could change to |
| 104 | * "isolate" again in the meantime, but since we are just dumping the |
| 105 | * state for debugging, it should be fine to accept a bit of |
| 106 | * inaccuracy here due to racing. |
| 107 | */ |
| 108 | pr_warn("%sflags: %pGp%s\n", type, &folio->flags, |
| 109 | is_migrate_cma_folio(folio, pfn) ? " CMA" : ""); |
Matthew Wilcox (Oracle) | 8f790d0 | 2024-03-21 14:24:46 +0000 | [diff] [blame] | 110 | if (page_has_type(&folio->page)) |
Matthew Wilcox (Oracle) | 4ffca5a | 2024-08-21 18:39:11 +0100 | [diff] [blame] | 111 | pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24, |
| 112 | page_type_name(folio->page.page_type)); |
Hyeonggon Yoo | f2421a1 | 2023-01-30 13:25:14 +0900 | [diff] [blame] | 113 | |
Michal Hocko | e0392cf | 2018-12-28 00:33:42 -0800 | [diff] [blame] | 114 | print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, |
Vlastimil Babka | 46e8a3a | 2016-12-12 16:44:35 -0800 | [diff] [blame] | 115 | sizeof(unsigned long), page, |
| 116 | sizeof(struct page), false); |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 117 | if (folio_test_large(folio)) |
Matthew Wilcox (Oracle) | 6197ab9 | 2020-04-01 21:05:49 -0700 | [diff] [blame] | 118 | print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, |
Matthew Wilcox (Oracle) | fae7d83 | 2024-02-27 19:23:31 +0000 | [diff] [blame] | 119 | sizeof(unsigned long), folio, |
| 120 | 2 * sizeof(struct page), false); |
| 121 | } |
| 122 | |
| 123 | static void __dump_page(const struct page *page) |
| 124 | { |
| 125 | struct folio *foliop, folio; |
| 126 | struct page precise; |
| 127 | unsigned long pfn = page_to_pfn(page); |
| 128 | unsigned long idx, nr_pages = 1; |
| 129 | int loops = 5; |
| 130 | |
| 131 | again: |
| 132 | memcpy(&precise, page, sizeof(*page)); |
| 133 | foliop = page_folio(&precise); |
| 134 | if (foliop == (struct folio *)&precise) { |
| 135 | idx = 0; |
| 136 | if (!folio_test_large(foliop)) |
| 137 | goto dump; |
| 138 | foliop = (struct folio *)page; |
| 139 | } else { |
| 140 | idx = folio_page_idx(foliop, page); |
| 141 | } |
| 142 | |
| 143 | if (idx < MAX_FOLIO_NR_PAGES) { |
| 144 | memcpy(&folio, foliop, 2 * sizeof(struct page)); |
| 145 | nr_pages = folio_nr_pages(&folio); |
| 146 | foliop = &folio; |
| 147 | } |
| 148 | |
| 149 | if (idx > nr_pages) { |
| 150 | if (loops-- > 0) |
| 151 | goto again; |
| 152 | pr_warn("page does not match folio\n"); |
| 153 | precise.compound_head &= ~1UL; |
| 154 | foliop = (struct folio *)&precise; |
| 155 | idx = 0; |
| 156 | } |
| 157 | |
| 158 | dump: |
| 159 | __dump_folio(foliop, &precise, pfn, idx); |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 160 | } |
| 161 | |
Matthew Wilcox (Oracle) | b3a3203 | 2024-02-27 19:23:32 +0000 | [diff] [blame] | 162 | void dump_page(const struct page *page, const char *reason) |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 163 | { |
Matthew Wilcox (Oracle) | be7c701 | 2021-06-28 19:41:13 -0700 | [diff] [blame] | 164 | if (PagePoisoned(page)) |
| 165 | pr_warn("page:%p is uninitialized and poisoned", page); |
| 166 | else |
| 167 | __dump_page(page); |
| 168 | if (reason) |
| 169 | pr_warn("page dumped because: %s\n", reason); |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 170 | dump_page_owner(page); |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 171 | } |
| 172 | EXPORT_SYMBOL(dump_page); |
| 173 | |
| 174 | #ifdef CONFIG_DEBUG_VM |
| 175 | |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 176 | void dump_vma(const struct vm_area_struct *vma) |
| 177 | { |
Liam R. Howlett | 763ecb0 | 2022-09-06 19:49:06 +0000 | [diff] [blame] | 178 | pr_emerg("vma %px start %px end %px mm %px\n" |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 179 | "prot %lx anon_vma %px vm_ops %px\n" |
| 180 | "pgoff %lx file %px private_data %px\n" |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 181 | "flags: %#lx(%pGv)\n", |
Liam R. Howlett | 763ecb0 | 2022-09-06 19:49:06 +0000 | [diff] [blame] | 182 | vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 183 | (unsigned long)pgprot_val(vma->vm_page_prot), |
| 184 | vma->anon_vma, vma->vm_ops, vma->vm_pgoff, |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 185 | vma->vm_file, vma->vm_private_data, |
| 186 | vma->vm_flags, &vma->vm_flags); |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 187 | } |
| 188 | EXPORT_SYMBOL(dump_vma); |
| 189 | |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 190 | void dump_mm(const struct mm_struct *mm) |
| 191 | { |
Liam R. Howlett | 763ecb0 | 2022-09-06 19:49:06 +0000 | [diff] [blame] | 192 | pr_emerg("mm %px task_size %lu\n" |
Liam R. Howlett | 763ecb0 | 2022-09-06 19:49:06 +0000 | [diff] [blame] | 193 | "mmap_base %lu mmap_legacy_base %lu\n" |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 194 | "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 195 | "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 196 | "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 197 | "start_code %lx end_code %lx start_data %lx end_data %lx\n" |
| 198 | "start_brk %lx brk %lx start_stack %lx\n" |
| 199 | "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" |
Eric W. Biederman | 0258b5f | 2021-09-22 11:24:02 -0500 | [diff] [blame] | 200 | "binfmt %px flags %lx\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 201 | #ifdef CONFIG_AIO |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 202 | "ioctx_table %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 203 | #endif |
| 204 | #ifdef CONFIG_MEMCG |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 205 | "owner %px " |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 206 | #endif |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 207 | "exe_file %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 208 | #ifdef CONFIG_MMU_NOTIFIER |
Jason Gunthorpe | 984cfe4 | 2019-12-18 13:40:35 -0400 | [diff] [blame] | 209 | "notifier_subscriptions %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 210 | #endif |
| 211 | #ifdef CONFIG_NUMA_BALANCING |
| 212 | "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" |
| 213 | #endif |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 214 | "tlb_flush_pending %d\n" |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 215 | "def_flags: %#lx(%pGv)\n", |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 216 | |
Liam R. Howlett | 763ecb0 | 2022-09-06 19:49:06 +0000 | [diff] [blame] | 217 | mm, mm->task_size, |
Liam R. Howlett | 763ecb0 | 2022-09-06 19:49:06 +0000 | [diff] [blame] | 218 | mm->mmap_base, mm->mmap_legacy_base, |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 219 | mm->pgd, atomic_read(&mm->mm_users), |
| 220 | atomic_read(&mm->mm_count), |
Kirill A. Shutemov | af5b0f6 | 2017-11-15 17:35:40 -0800 | [diff] [blame] | 221 | mm_pgtables_bytes(mm), |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 222 | mm->map_count, |
| 223 | mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, |
Qian Cai | 44dc1b1 | 2019-03-28 20:43:23 -0700 | [diff] [blame] | 224 | (u64)atomic64_read(&mm->pinned_vm), |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 225 | mm->data_vm, mm->exec_vm, mm->stack_vm, |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 226 | mm->start_code, mm->end_code, mm->start_data, mm->end_data, |
| 227 | mm->start_brk, mm->brk, mm->start_stack, |
| 228 | mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, |
Eric W. Biederman | 0258b5f | 2021-09-22 11:24:02 -0500 | [diff] [blame] | 229 | mm->binfmt, mm->flags, |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 230 | #ifdef CONFIG_AIO |
| 231 | mm->ioctx_table, |
| 232 | #endif |
| 233 | #ifdef CONFIG_MEMCG |
| 234 | mm->owner, |
| 235 | #endif |
| 236 | mm->exe_file, |
| 237 | #ifdef CONFIG_MMU_NOTIFIER |
Jason Gunthorpe | 984cfe4 | 2019-12-18 13:40:35 -0400 | [diff] [blame] | 238 | mm->notifier_subscriptions, |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 239 | #endif |
| 240 | #ifdef CONFIG_NUMA_BALANCING |
| 241 | mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, |
| 242 | #endif |
Nadav Amit | 16af97d | 2017-08-10 15:23:56 -0700 | [diff] [blame] | 243 | atomic_read(&mm->tlb_flush_pending), |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 244 | mm->def_flags, &mm->def_flags |
| 245 | ); |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 246 | } |
Suren Baghdasaryan | c2fdc23 | 2023-01-26 11:37:52 -0800 | [diff] [blame] | 247 | EXPORT_SYMBOL(dump_mm); |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 248 | |
Alexander Duyck | f682a97 | 2018-10-26 15:07:45 -0700 | [diff] [blame] | 249 | static bool page_init_poisoning __read_mostly = true; |
| 250 | |
| 251 | static int __init setup_vm_debug(char *str) |
| 252 | { |
| 253 | bool __page_init_poisoning = true; |
| 254 | |
| 255 | /* |
| 256 | * Calling vm_debug with no arguments is equivalent to requesting |
| 257 | * to enable all debugging options we can control. |
| 258 | */ |
| 259 | if (*str++ != '=' || !*str) |
| 260 | goto out; |
| 261 | |
| 262 | __page_init_poisoning = false; |
| 263 | if (*str == '-') |
| 264 | goto out; |
| 265 | |
| 266 | while (*str) { |
| 267 | switch (tolower(*str)) { |
| 268 | case'p': |
| 269 | __page_init_poisoning = true; |
| 270 | break; |
| 271 | default: |
| 272 | pr_err("vm_debug option '%c' unknown. skipped\n", |
| 273 | *str); |
| 274 | } |
| 275 | |
| 276 | str++; |
| 277 | } |
| 278 | out: |
| 279 | if (page_init_poisoning && !__page_init_poisoning) |
| 280 | pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); |
| 281 | |
| 282 | page_init_poisoning = __page_init_poisoning; |
| 283 | |
| 284 | return 1; |
| 285 | } |
| 286 | __setup("vm_debug", setup_vm_debug); |
| 287 | |
| 288 | void page_init_poison(struct page *page, size_t size) |
| 289 | { |
| 290 | if (page_init_poisoning) |
| 291 | memset(page, PAGE_POISON_PATTERN, size); |
| 292 | } |
Liam R. Howlett | b50e195 | 2023-05-18 10:55:26 -0400 | [diff] [blame] | 293 | |
| 294 | void vma_iter_dump_tree(const struct vma_iterator *vmi) |
| 295 | { |
| 296 | #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) |
| 297 | mas_dump(&vmi->mas); |
| 298 | mt_dump(vmi->mas.tree, mt_dump_hex); |
| 299 | #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ |
| 300 | } |
| 301 | |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 302 | #endif /* CONFIG_DEBUG_VM */ |