Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 2 | /* |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 3 | * Memory subsystem support |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 4 | * |
| 5 | * Written by Matt Tolentino <matthew.e.tolentino@intel.com> |
| 6 | * Dave Hansen <haveblue@us.ibm.com> |
| 7 | * |
| 8 | * This file provides the necessary infrastructure to represent |
| 9 | * a SPARSEMEM-memory-model system's physical memory in /sysfs. |
| 10 | * All arch-independent code that assumes MEMORY_HOTPLUG requires |
| 11 | * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. |
| 12 | */ |
| 13 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/init.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 16 | #include <linux/topology.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 17 | #include <linux/capability.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 18 | #include <linux/device.h> |
| 19 | #include <linux/memory.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 20 | #include <linux/memory_hotplug.h> |
| 21 | #include <linux/mm.h> |
Shaohua Li | 9f1b16a | 2008-10-18 20:27:12 -0700 | [diff] [blame] | 22 | #include <linux/stat.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 24 | #include <linux/xarray.h> |
Shaohua Li | 9f1b16a | 2008-10-18 20:27:12 -0700 | [diff] [blame] | 25 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 26 | #include <linux/atomic.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 27 | #include <linux/uaccess.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 28 | |
| 29 | #define MEMORY_CLASS_NAME "memory" |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 30 | |
David Hildenbrand | 4dc8207 | 2020-04-06 20:07:24 -0700 | [diff] [blame] | 31 | static const char *const online_type_to_str[] = { |
| 32 | [MMOP_OFFLINE] = "offline", |
| 33 | [MMOP_ONLINE] = "online", |
| 34 | [MMOP_ONLINE_KERNEL] = "online_kernel", |
| 35 | [MMOP_ONLINE_MOVABLE] = "online_movable", |
| 36 | }; |
| 37 | |
Anshuman Khandual | 1adf8b4 | 2021-02-25 17:17:13 -0800 | [diff] [blame] | 38 | int mhp_online_type_from_str(const char *str) |
David Hildenbrand | 4dc8207 | 2020-04-06 20:07:24 -0700 | [diff] [blame] | 39 | { |
| 40 | int i; |
| 41 | |
| 42 | for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { |
| 43 | if (sysfs_streq(str, online_type_to_str[i])) |
| 44 | return i; |
| 45 | } |
| 46 | return -EINVAL; |
| 47 | } |
| 48 | |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 49 | #define to_memory_block(dev) container_of(dev, struct memory_block, dev) |
| 50 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 51 | static int sections_per_block; |
| 52 | |
Wei Yang | 178bdbe | 2020-06-23 10:57:01 +0800 | [diff] [blame] | 53 | static inline unsigned long memory_block_id(unsigned long section_nr) |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 54 | { |
| 55 | return section_nr / sections_per_block; |
| 56 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 57 | |
David Hildenbrand | 90ec010f | 2019-07-18 15:57:40 -0700 | [diff] [blame] | 58 | static inline unsigned long pfn_to_block_id(unsigned long pfn) |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 59 | { |
Wei Yang | 178bdbe | 2020-06-23 10:57:01 +0800 | [diff] [blame] | 60 | return memory_block_id(pfn_to_section_nr(pfn)); |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 61 | } |
| 62 | |
David Hildenbrand | ea88464 | 2019-07-18 15:57:50 -0700 | [diff] [blame] | 63 | static inline unsigned long phys_to_block_id(unsigned long phys) |
| 64 | { |
| 65 | return pfn_to_block_id(PFN_DOWN(phys)); |
| 66 | } |
| 67 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 68 | static int memory_subsys_online(struct device *dev); |
| 69 | static int memory_subsys_offline(struct device *dev); |
| 70 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 71 | static struct bus_type memory_subsys = { |
Kay Sievers | af5ca3f4 | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 72 | .name = MEMORY_CLASS_NAME, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 73 | .dev_name = MEMORY_CLASS_NAME, |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 74 | .online = memory_subsys_online, |
| 75 | .offline = memory_subsys_offline, |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 76 | }; |
| 77 | |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 78 | /* |
| 79 | * Memory blocks are cached in a local radix tree to avoid |
| 80 | * a costly linear search for the corresponding device on |
| 81 | * the subsystem bus. |
| 82 | */ |
| 83 | static DEFINE_XARRAY(memory_blocks); |
| 84 | |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 85 | /* |
| 86 | * Memory groups, indexed by memory group id (mgid). |
| 87 | */ |
| 88 | static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC); |
David Hildenbrand | 3fcebf9 | 2021-09-07 19:55:48 -0700 | [diff] [blame] | 89 | #define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1 |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 90 | |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 91 | static BLOCKING_NOTIFIER_HEAD(memory_chain); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 92 | |
Andy Whitcroft | 98a38eb | 2006-01-06 00:10:35 -0800 | [diff] [blame] | 93 | int register_memory_notifier(struct notifier_block *nb) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 94 | { |
Ioana Ciornei | 2aeebca | 2015-03-08 12:48:35 +0200 | [diff] [blame] | 95 | return blocking_notifier_chain_register(&memory_chain, nb); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 96 | } |
Hannes Hering | 3c82c30 | 2008-05-07 14:43:01 +0200 | [diff] [blame] | 97 | EXPORT_SYMBOL(register_memory_notifier); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 98 | |
Andy Whitcroft | 98a38eb | 2006-01-06 00:10:35 -0800 | [diff] [blame] | 99 | void unregister_memory_notifier(struct notifier_block *nb) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 100 | { |
Ioana Ciornei | 2aeebca | 2015-03-08 12:48:35 +0200 | [diff] [blame] | 101 | blocking_notifier_chain_unregister(&memory_chain, nb); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 102 | } |
Hannes Hering | 3c82c30 | 2008-05-07 14:43:01 +0200 | [diff] [blame] | 103 | EXPORT_SYMBOL(unregister_memory_notifier); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 104 | |
Yasuaki Ishimatsu | fa7194e | 2012-12-11 16:00:44 -0800 | [diff] [blame] | 105 | static void memory_block_release(struct device *dev) |
| 106 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 107 | struct memory_block *mem = to_memory_block(dev); |
Yasuaki Ishimatsu | fa7194e | 2012-12-11 16:00:44 -0800 | [diff] [blame] | 108 | |
| 109 | kfree(mem); |
| 110 | } |
| 111 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 112 | unsigned long __weak memory_block_size_bytes(void) |
| 113 | { |
| 114 | return MIN_MEMORY_BLOCK_SIZE; |
| 115 | } |
Dave Hansen | c221c0b | 2019-02-25 10:57:40 -0800 | [diff] [blame] | 116 | EXPORT_SYMBOL_GPL(memory_block_size_bytes); |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 117 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 118 | /* |
David Hildenbrand | f915fb7 | 2019-09-23 15:35:43 -0700 | [diff] [blame] | 119 | * Show the first physical section index (number) of this memory block. |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 120 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 121 | static ssize_t phys_index_show(struct device *dev, |
| 122 | struct device_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 123 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 124 | struct memory_block *mem = to_memory_block(dev); |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 125 | unsigned long phys_index; |
| 126 | |
| 127 | phys_index = mem->start_section_nr / sections_per_block; |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 128 | |
Joe Perches | aa83889 | 2020-09-16 13:40:39 -0700 | [diff] [blame] | 129 | return sysfs_emit(buf, "%08lx\n", phys_index); |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 130 | } |
| 131 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 132 | /* |
David Hildenbrand | 53cdc1c | 2020-03-28 19:17:19 -0700 | [diff] [blame] | 133 | * Legacy interface that we cannot remove. Always indicate "removable" |
| 134 | * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 135 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 136 | static ssize_t removable_show(struct device *dev, struct device_attribute *attr, |
| 137 | char *buf) |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 138 | { |
Joe Perches | aa83889 | 2020-09-16 13:40:39 -0700 | [diff] [blame] | 139 | return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | /* |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 143 | * online, offline, going offline, etc. |
| 144 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 145 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
| 146 | char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 147 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 148 | struct memory_block *mem = to_memory_block(dev); |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 149 | const char *output; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 150 | |
| 151 | /* |
| 152 | * We can probably put these states in a nice little array |
| 153 | * so that they're not open-coded |
| 154 | */ |
| 155 | switch (mem->state) { |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 156 | case MEM_ONLINE: |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 157 | output = "online"; |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 158 | break; |
| 159 | case MEM_OFFLINE: |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 160 | output = "offline"; |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 161 | break; |
| 162 | case MEM_GOING_OFFLINE: |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 163 | output = "going-offline"; |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 164 | break; |
| 165 | default: |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 166 | WARN_ON(1); |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 167 | return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 168 | } |
| 169 | |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 170 | return sysfs_emit(buf, "%s\n", output); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 173 | int memory_notify(unsigned long val, void *v) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 174 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 175 | return blocking_notifier_call_chain(&memory_chain, val, v); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 178 | static int memory_block_online(struct memory_block *mem) |
| 179 | { |
| 180 | unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
| 181 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 182 | unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; |
| 183 | struct zone *zone; |
| 184 | int ret; |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 185 | |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 186 | zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group, |
| 187 | start_pfn, nr_pages); |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 188 | |
| 189 | /* |
| 190 | * Although vmemmap pages have a different lifecycle than the pages |
| 191 | * they describe (they remain until the memory is unplugged), doing |
| 192 | * their initialization and accounting at memory onlining/offlining |
| 193 | * stage helps to keep accounting easier to follow - e.g vmemmaps |
| 194 | * belong to the same zone as the memory they backed. |
| 195 | */ |
| 196 | if (nr_vmemmap_pages) { |
| 197 | ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); |
| 198 | if (ret) |
| 199 | return ret; |
| 200 | } |
| 201 | |
| 202 | ret = online_pages(start_pfn + nr_vmemmap_pages, |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 203 | nr_pages - nr_vmemmap_pages, zone, mem->group); |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 204 | if (ret) { |
| 205 | if (nr_vmemmap_pages) |
| 206 | mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); |
| 207 | return ret; |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * Account once onlining succeeded. If the zone was unpopulated, it is |
| 212 | * now already properly populated. |
| 213 | */ |
| 214 | if (nr_vmemmap_pages) |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 215 | adjust_present_page_count(pfn_to_page(start_pfn), mem->group, |
David Hildenbrand | 4b09700 | 2021-09-07 19:55:19 -0700 | [diff] [blame] | 216 | nr_vmemmap_pages); |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 217 | |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 218 | mem->zone = zone; |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 219 | return ret; |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | static int memory_block_offline(struct memory_block *mem) |
| 223 | { |
| 224 | unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
| 225 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 226 | unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 227 | int ret; |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 228 | |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 229 | if (!mem->zone) |
| 230 | return -EINVAL; |
| 231 | |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 232 | /* |
| 233 | * Unaccount before offlining, such that unpopulated zone and kthreads |
| 234 | * can properly be torn down in offline_pages(). |
| 235 | */ |
David Hildenbrand | 4b09700 | 2021-09-07 19:55:19 -0700 | [diff] [blame] | 236 | if (nr_vmemmap_pages) |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 237 | adjust_present_page_count(pfn_to_page(start_pfn), mem->group, |
David Hildenbrand | 4b09700 | 2021-09-07 19:55:19 -0700 | [diff] [blame] | 238 | -nr_vmemmap_pages); |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 239 | |
| 240 | ret = offline_pages(start_pfn + nr_vmemmap_pages, |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 241 | nr_pages - nr_vmemmap_pages, mem->zone, mem->group); |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 242 | if (ret) { |
| 243 | /* offline_pages() failed. Account back. */ |
| 244 | if (nr_vmemmap_pages) |
David Hildenbrand | 4b09700 | 2021-09-07 19:55:19 -0700 | [diff] [blame] | 245 | adjust_present_page_count(pfn_to_page(start_pfn), |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 246 | mem->group, nr_vmemmap_pages); |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 247 | return ret; |
| 248 | } |
| 249 | |
| 250 | if (nr_vmemmap_pages) |
| 251 | mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); |
| 252 | |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 253 | mem->zone = NULL; |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 254 | return ret; |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 255 | } |
| 256 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 257 | /* |
| 258 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is |
| 259 | * OK to have direct references to sparsemem variables in here. |
| 260 | */ |
| 261 | static int |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 262 | memory_block_action(struct memory_block *mem, unsigned long action) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 263 | { |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 264 | int ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 265 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 266 | switch (action) { |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 267 | case MEM_ONLINE: |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 268 | ret = memory_block_online(mem); |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 269 | break; |
| 270 | case MEM_OFFLINE: |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 271 | ret = memory_block_offline(mem); |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 272 | break; |
| 273 | default: |
| 274 | WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 275 | "%ld\n", __func__, mem->start_section_nr, action, action); |
Ioana Ciornei | 3d3af6af | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 276 | ret = -EINVAL; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 277 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 278 | |
| 279 | return ret; |
| 280 | } |
| 281 | |
Nathan Fontenot | dc18d70 | 2017-02-24 15:00:02 -0800 | [diff] [blame] | 282 | static int memory_block_change_state(struct memory_block *mem, |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 283 | unsigned long to_state, unsigned long from_state_req) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 284 | { |
Greg Kroah-Hartman | de0ed36 | 2011-10-18 14:00:57 -0700 | [diff] [blame] | 285 | int ret = 0; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 286 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 287 | if (mem->state != from_state_req) |
| 288 | return -EINVAL; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 289 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 290 | if (to_state == MEM_OFFLINE) |
| 291 | mem->state = MEM_GOING_OFFLINE; |
| 292 | |
Oscar Salvador | 8736cc2 | 2021-05-04 18:39:33 -0700 | [diff] [blame] | 293 | ret = memory_block_action(mem, to_state); |
Rafael J. Wysocki | b2c064b | 2013-05-23 10:38:55 +0200 | [diff] [blame] | 294 | mem->state = ret ? from_state_req : to_state; |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 295 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 296 | return ret; |
| 297 | } |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 298 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 299 | /* The device lock serializes operations on memory_subsys_[online|offline] */ |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 300 | static int memory_subsys_online(struct device *dev) |
| 301 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 302 | struct memory_block *mem = to_memory_block(dev); |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 303 | int ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 304 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 305 | if (mem->state == MEM_ONLINE) |
| 306 | return 0; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 307 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 308 | /* |
David Hildenbrand | efc978a | 2020-04-06 20:07:20 -0700 | [diff] [blame] | 309 | * When called via device_online() without configuring the online_type, |
| 310 | * we want to default to MMOP_ONLINE. |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 311 | */ |
David Hildenbrand | efc978a | 2020-04-06 20:07:20 -0700 | [diff] [blame] | 312 | if (mem->online_type == MMOP_OFFLINE) |
David Hildenbrand | 956f8b4 | 2020-04-06 20:07:16 -0700 | [diff] [blame] | 313 | mem->online_type = MMOP_ONLINE; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 314 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 315 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); |
David Hildenbrand | efc978a | 2020-04-06 20:07:20 -0700 | [diff] [blame] | 316 | mem->online_type = MMOP_OFFLINE; |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 317 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 318 | return ret; |
| 319 | } |
| 320 | |
| 321 | static int memory_subsys_offline(struct device *dev) |
| 322 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 323 | struct memory_block *mem = to_memory_block(dev); |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 324 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 325 | if (mem->state == MEM_OFFLINE) |
| 326 | return 0; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 327 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 328 | return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 329 | } |
| 330 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 331 | static ssize_t state_store(struct device *dev, struct device_attribute *attr, |
| 332 | const char *buf, size_t count) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 333 | { |
Anshuman Khandual | 1adf8b4 | 2021-02-25 17:17:13 -0800 | [diff] [blame] | 334 | const int online_type = mhp_online_type_from_str(buf); |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 335 | struct memory_block *mem = to_memory_block(dev); |
David Hildenbrand | 4dc8207 | 2020-04-06 20:07:24 -0700 | [diff] [blame] | 336 | int ret; |
| 337 | |
| 338 | if (online_type < 0) |
| 339 | return -EINVAL; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 340 | |
Rafael J. Wysocki | 5e33bc4 | 2013-08-28 21:41:01 +0200 | [diff] [blame] | 341 | ret = lock_device_hotplug_sysfs(); |
| 342 | if (ret) |
| 343 | return ret; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 344 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 345 | switch (online_type) { |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 346 | case MMOP_ONLINE_KERNEL: |
| 347 | case MMOP_ONLINE_MOVABLE: |
David Hildenbrand | 956f8b4 | 2020-04-06 20:07:16 -0700 | [diff] [blame] | 348 | case MMOP_ONLINE: |
David Hildenbrand | 381eab4 | 2018-10-30 15:10:29 -0700 | [diff] [blame] | 349 | /* mem->online_type is protected by device_hotplug_lock */ |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 350 | mem->online_type = online_type; |
| 351 | ret = device_online(&mem->dev); |
| 352 | break; |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 353 | case MMOP_OFFLINE: |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 354 | ret = device_offline(&mem->dev); |
| 355 | break; |
| 356 | default: |
| 357 | ret = -EINVAL; /* should never happen */ |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 358 | } |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 359 | |
| 360 | unlock_device_hotplug(); |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 361 | |
Reza Arbab | d66ba15 | 2016-10-07 17:00:15 -0700 | [diff] [blame] | 362 | if (ret < 0) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 363 | return ret; |
Reza Arbab | d66ba15 | 2016-10-07 17:00:15 -0700 | [diff] [blame] | 364 | if (ret) |
| 365 | return -EINVAL; |
| 366 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 367 | return count; |
| 368 | } |
| 369 | |
| 370 | /* |
David Hildenbrand | e9a2e48 | 2021-02-25 17:17:24 -0800 | [diff] [blame] | 371 | * Legacy interface that we cannot remove: s390x exposes the storage increment |
| 372 | * covered by a memory block, allowing for identifying which memory blocks |
| 373 | * comprise a storage increment. Since a memory block spans complete |
| 374 | * storage increments nowadays, this interface is basically unused. Other |
| 375 | * archs never exposed != 0. |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 376 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 377 | static ssize_t phys_device_show(struct device *dev, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 378 | struct device_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 379 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 380 | struct memory_block *mem = to_memory_block(dev); |
David Hildenbrand | e9a2e48 | 2021-02-25 17:17:24 -0800 | [diff] [blame] | 381 | unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 382 | |
David Hildenbrand | e9a2e48 | 2021-02-25 17:17:24 -0800 | [diff] [blame] | 383 | return sysfs_emit(buf, "%d\n", |
| 384 | arch_get_memory_phys_device(start_pfn)); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 385 | } |
| 386 | |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 387 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 388 | static int print_allowed_zone(char *buf, int len, int nid, |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 389 | struct memory_group *group, |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 390 | unsigned long start_pfn, unsigned long nr_pages, |
| 391 | int online_type, struct zone *default_zone) |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 392 | { |
| 393 | struct zone *zone; |
| 394 | |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 395 | zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages); |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 396 | if (zone == default_zone) |
| 397 | return 0; |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 398 | |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 399 | return sysfs_emit_at(buf, len, " %s", zone->name); |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 400 | } |
| 401 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 402 | static ssize_t valid_zones_show(struct device *dev, |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 403 | struct device_attribute *attr, char *buf) |
| 404 | { |
| 405 | struct memory_block *mem = to_memory_block(dev); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 406 | unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 407 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 408 | struct memory_group *group = mem->group; |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 409 | struct zone *default_zone; |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 410 | int nid = mem->nid; |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 411 | int len = 0; |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 412 | |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 413 | /* |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 414 | * Check the existing zone. Make sure that we do that only on the |
| 415 | * online nodes otherwise the page_zone is not reliable |
| 416 | */ |
| 417 | if (mem->state == MEM_ONLINE) { |
Mikhail Zaslonko | 4e8346d | 2018-09-04 15:46:09 -0700 | [diff] [blame] | 418 | /* |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 419 | * If !mem->zone, the memory block spans multiple zones and |
| 420 | * cannot get offlined. |
Mikhail Zaslonko | 4e8346d | 2018-09-04 15:46:09 -0700 | [diff] [blame] | 421 | */ |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 422 | default_zone = mem->zone; |
David Hildenbrand | 9291799 | 2020-02-03 17:34:26 -0800 | [diff] [blame] | 423 | if (!default_zone) |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 424 | return sysfs_emit(buf, "%s\n", "none"); |
| 425 | len += sysfs_emit_at(buf, len, "%s", default_zone->name); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 426 | goto out; |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 427 | } |
| 428 | |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 429 | default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group, |
| 430 | start_pfn, nr_pages); |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 431 | |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 432 | len += sysfs_emit_at(buf, len, "%s", default_zone->name); |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 433 | len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 434 | MMOP_ONLINE_KERNEL, default_zone); |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 435 | len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 436 | MMOP_ONLINE_MOVABLE, default_zone); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 437 | out: |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 438 | len += sysfs_emit_at(buf, len, "\n"); |
Joe Perches | 973c391 | 2020-09-16 13:40:40 -0700 | [diff] [blame] | 439 | return len; |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 440 | } |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 441 | static DEVICE_ATTR_RO(valid_zones); |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 442 | #endif |
| 443 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 444 | static DEVICE_ATTR_RO(phys_index); |
| 445 | static DEVICE_ATTR_RW(state); |
| 446 | static DEVICE_ATTR_RO(phys_device); |
| 447 | static DEVICE_ATTR_RO(removable); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 448 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 449 | /* |
David Hildenbrand | f915fb7 | 2019-09-23 15:35:43 -0700 | [diff] [blame] | 450 | * Show the memory block size (shared by all memory blocks). |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 451 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 452 | static ssize_t block_size_bytes_show(struct device *dev, |
| 453 | struct device_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 454 | { |
Joe Perches | aa83889 | 2020-09-16 13:40:39 -0700 | [diff] [blame] | 455 | return sysfs_emit(buf, "%lx\n", memory_block_size_bytes()); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 456 | } |
| 457 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 458 | static DEVICE_ATTR_RO(block_size_bytes); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 459 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 460 | /* |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 461 | * Memory auto online policy. |
| 462 | */ |
| 463 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 464 | static ssize_t auto_online_blocks_show(struct device *dev, |
| 465 | struct device_attribute *attr, char *buf) |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 466 | { |
Joe Perches | aa83889 | 2020-09-16 13:40:39 -0700 | [diff] [blame] | 467 | return sysfs_emit(buf, "%s\n", |
Anshuman Khandual | 1adf8b4 | 2021-02-25 17:17:13 -0800 | [diff] [blame] | 468 | online_type_to_str[mhp_default_online_type]); |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 469 | } |
| 470 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 471 | static ssize_t auto_online_blocks_store(struct device *dev, |
| 472 | struct device_attribute *attr, |
| 473 | const char *buf, size_t count) |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 474 | { |
Anshuman Khandual | 1adf8b4 | 2021-02-25 17:17:13 -0800 | [diff] [blame] | 475 | const int online_type = mhp_online_type_from_str(buf); |
David Hildenbrand | 5f47adf | 2020-04-06 20:07:44 -0700 | [diff] [blame] | 476 | |
| 477 | if (online_type < 0) |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 478 | return -EINVAL; |
| 479 | |
Anshuman Khandual | 1adf8b4 | 2021-02-25 17:17:13 -0800 | [diff] [blame] | 480 | mhp_default_online_type = online_type; |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 481 | return count; |
| 482 | } |
| 483 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 484 | static DEVICE_ATTR_RW(auto_online_blocks); |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 485 | |
| 486 | /* |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 487 | * Some architectures will have custom drivers to do this, and |
| 488 | * will not need to do it from userspace. The fake hot-add code |
| 489 | * as well as ppc64 will do all of their discovery in userspace |
| 490 | * and will require this interface. |
| 491 | */ |
| 492 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 493 | static ssize_t probe_store(struct device *dev, struct device_attribute *attr, |
| 494 | const char *buf, size_t count) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 495 | { |
| 496 | u64 phys_addr; |
John Allen | cb5490a | 2016-01-14 15:22:16 -0800 | [diff] [blame] | 497 | int nid, ret; |
Anton Blanchard | 61b94fe | 2011-09-15 06:26:15 +1000 | [diff] [blame] | 498 | unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 499 | |
Zhang Zhen | b69deb2 | 2014-08-06 16:06:06 -0700 | [diff] [blame] | 500 | ret = kstrtoull(buf, 0, &phys_addr); |
| 501 | if (ret) |
| 502 | return ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 503 | |
Anton Blanchard | 61b94fe | 2011-09-15 06:26:15 +1000 | [diff] [blame] | 504 | if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) |
| 505 | return -EINVAL; |
| 506 | |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 507 | ret = lock_device_hotplug_sysfs(); |
| 508 | if (ret) |
zhong jiang | 3780384 | 2019-04-18 17:50:16 -0700 | [diff] [blame] | 509 | return ret; |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 510 | |
John Allen | cb5490a | 2016-01-14 15:22:16 -0800 | [diff] [blame] | 511 | nid = memory_add_physaddr_to_nid(phys_addr); |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 512 | ret = __add_memory(nid, phys_addr, |
David Hildenbrand | b611719 | 2020-10-15 20:08:44 -0700 | [diff] [blame] | 513 | MIN_MEMORY_BLOCK_SIZE * sections_per_block, |
| 514 | MHP_NONE); |
Nathan Fontenot | 6add7cd | 2011-01-31 10:55:23 -0600 | [diff] [blame] | 515 | |
John Allen | cb5490a | 2016-01-14 15:22:16 -0800 | [diff] [blame] | 516 | if (ret) |
| 517 | goto out; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 518 | |
Nikanth Karthikesan | 9f0af69 | 2011-03-24 11:46:18 +0530 | [diff] [blame] | 519 | ret = count; |
| 520 | out: |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 521 | unlock_device_hotplug(); |
Nikanth Karthikesan | 9f0af69 | 2011-03-24 11:46:18 +0530 | [diff] [blame] | 522 | return ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 523 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 524 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 525 | static DEVICE_ATTR_WO(probe); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 526 | #endif |
| 527 | |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 528 | #ifdef CONFIG_MEMORY_FAILURE |
| 529 | /* |
| 530 | * Support for offlining pages of memory |
| 531 | */ |
| 532 | |
| 533 | /* Soft offline a page */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 534 | static ssize_t soft_offline_page_store(struct device *dev, |
| 535 | struct device_attribute *attr, |
| 536 | const char *buf, size_t count) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 537 | { |
| 538 | int ret; |
| 539 | u64 pfn; |
| 540 | if (!capable(CAP_SYS_ADMIN)) |
| 541 | return -EPERM; |
Jingoo Han | 34da5e6 | 2013-07-26 13:10:22 +0900 | [diff] [blame] | 542 | if (kstrtoull(buf, 0, &pfn) < 0) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 543 | return -EINVAL; |
| 544 | pfn >>= PAGE_SHIFT; |
Naoya Horiguchi | feec24a | 2019-11-30 17:53:38 -0800 | [diff] [blame] | 545 | ret = soft_offline_page(pfn, 0); |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 546 | return ret == 0 ? count : ret; |
| 547 | } |
| 548 | |
| 549 | /* Forcibly offline a page, including killing processes. */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 550 | static ssize_t hard_offline_page_store(struct device *dev, |
| 551 | struct device_attribute *attr, |
| 552 | const char *buf, size_t count) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 553 | { |
| 554 | int ret; |
| 555 | u64 pfn; |
| 556 | if (!capable(CAP_SYS_ADMIN)) |
| 557 | return -EPERM; |
Jingoo Han | 34da5e6 | 2013-07-26 13:10:22 +0900 | [diff] [blame] | 558 | if (kstrtoull(buf, 0, &pfn) < 0) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 559 | return -EINVAL; |
| 560 | pfn >>= PAGE_SHIFT; |
Eric W. Biederman | 83b57531 | 2017-07-09 18:14:01 -0500 | [diff] [blame] | 561 | ret = memory_failure(pfn, 0); |
luofei | d1fe111 | 2022-03-22 14:44:38 -0700 | [diff] [blame] | 562 | if (ret == -EOPNOTSUPP) |
| 563 | ret = 0; |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 564 | return ret ? ret : count; |
| 565 | } |
| 566 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 567 | static DEVICE_ATTR_WO(soft_offline_page); |
| 568 | static DEVICE_ATTR_WO(hard_offline_page); |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 569 | #endif |
| 570 | |
David Hildenbrand | e9a2e48 | 2021-02-25 17:17:24 -0800 | [diff] [blame] | 571 | /* See phys_device_show(). */ |
Heiko Carstens | bc32df0 | 2010-03-15 00:35:03 -0400 | [diff] [blame] | 572 | int __weak arch_get_memory_phys_device(unsigned long start_pfn) |
| 573 | { |
| 574 | return 0; |
| 575 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 576 | |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 577 | /* |
| 578 | * A reference for the returned memory block device is acquired. |
| 579 | * |
| 580 | * Called under device_hotplug_lock. |
| 581 | */ |
David Hildenbrand | dd62528 | 2019-07-18 15:57:53 -0700 | [diff] [blame] | 582 | static struct memory_block *find_memory_block_by_id(unsigned long block_id) |
Robin Holt | 9838303 | 2010-09-29 14:00:55 -0500 | [diff] [blame] | 583 | { |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 584 | struct memory_block *mem; |
Robin Holt | 9838303 | 2010-09-29 14:00:55 -0500 | [diff] [blame] | 585 | |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 586 | mem = xa_load(&memory_blocks, block_id); |
| 587 | if (mem) |
| 588 | get_device(&mem->dev); |
| 589 | return mem; |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 590 | } |
| 591 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 592 | /* |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 593 | * Called under device_hotplug_lock. |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 594 | */ |
Ohhoon Kwon | fc1f5e9 | 2021-09-02 14:57:01 -0700 | [diff] [blame] | 595 | struct memory_block *find_memory_block(unsigned long section_nr) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 596 | { |
Ohhoon Kwon | fc1f5e9 | 2021-09-02 14:57:01 -0700 | [diff] [blame] | 597 | unsigned long block_id = memory_block_id(section_nr); |
David Hildenbrand | dd62528 | 2019-07-18 15:57:53 -0700 | [diff] [blame] | 598 | |
| 599 | return find_memory_block_by_id(block_id); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 600 | } |
| 601 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 602 | static struct attribute *memory_memblk_attrs[] = { |
| 603 | &dev_attr_phys_index.attr, |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 604 | &dev_attr_state.attr, |
| 605 | &dev_attr_phys_device.attr, |
| 606 | &dev_attr_removable.attr, |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 607 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 608 | &dev_attr_valid_zones.attr, |
| 609 | #endif |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 610 | NULL |
| 611 | }; |
| 612 | |
Rikard Falkeborn | 5a57676 | 2021-05-28 23:34:08 +0200 | [diff] [blame] | 613 | static const struct attribute_group memory_memblk_attr_group = { |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 614 | .attrs = memory_memblk_attrs, |
| 615 | }; |
| 616 | |
| 617 | static const struct attribute_group *memory_memblk_attr_groups[] = { |
| 618 | &memory_memblk_attr_group, |
| 619 | NULL, |
| 620 | }; |
| 621 | |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 622 | static int __add_memory_block(struct memory_block *memory) |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 623 | { |
Arvind Yadav | 085aa2d | 2018-04-26 21:12:09 +0530 | [diff] [blame] | 624 | int ret; |
| 625 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 626 | memory->dev.bus = &memory_subsys; |
| 627 | memory->dev.id = memory->start_section_nr / sections_per_block; |
| 628 | memory->dev.release = memory_block_release; |
| 629 | memory->dev.groups = memory_memblk_attr_groups; |
Linus Torvalds | f991fae | 2013-07-03 14:35:40 -0700 | [diff] [blame] | 630 | memory->dev.offline = memory->state == MEM_OFFLINE; |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 631 | |
Arvind Yadav | 085aa2d | 2018-04-26 21:12:09 +0530 | [diff] [blame] | 632 | ret = device_register(&memory->dev); |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 633 | if (ret) { |
Arvind Yadav | 085aa2d | 2018-04-26 21:12:09 +0530 | [diff] [blame] | 634 | put_device(&memory->dev); |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 635 | return ret; |
| 636 | } |
| 637 | ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, |
| 638 | GFP_KERNEL)); |
Christophe JAILLET | f47f758 | 2022-04-28 23:16:19 -0700 | [diff] [blame] | 639 | if (ret) |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 640 | device_unregister(&memory->dev); |
Christophe JAILLET | f47f758 | 2022-04-28 23:16:19 -0700 | [diff] [blame] | 641 | |
Arvind Yadav | 085aa2d | 2018-04-26 21:12:09 +0530 | [diff] [blame] | 642 | return ret; |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 643 | } |
| 644 | |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 645 | static struct zone *early_node_zone_for_memory_block(struct memory_block *mem, |
| 646 | int nid) |
| 647 | { |
| 648 | const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
| 649 | const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
| 650 | struct zone *zone, *matching_zone = NULL; |
| 651 | pg_data_t *pgdat = NODE_DATA(nid); |
| 652 | int i; |
| 653 | |
| 654 | /* |
| 655 | * This logic only works for early memory, when the applicable zones |
| 656 | * already span the memory block. We don't expect overlapping zones on |
| 657 | * a single node for early memory. So if we're told that some PFNs |
| 658 | * of a node fall into this memory block, we can assume that all node |
| 659 | * zones that intersect with the memory block are actually applicable. |
| 660 | * No need to look at the memmap. |
| 661 | */ |
| 662 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 663 | zone = pgdat->node_zones + i; |
| 664 | if (!populated_zone(zone)) |
| 665 | continue; |
| 666 | if (!zone_intersects(zone, start_pfn, nr_pages)) |
| 667 | continue; |
| 668 | if (!matching_zone) { |
| 669 | matching_zone = zone; |
| 670 | continue; |
| 671 | } |
| 672 | /* Spans multiple zones ... */ |
| 673 | matching_zone = NULL; |
| 674 | break; |
| 675 | } |
| 676 | return matching_zone; |
| 677 | } |
| 678 | |
| 679 | #ifdef CONFIG_NUMA |
| 680 | /** |
| 681 | * memory_block_add_nid() - Indicate that system RAM falling into this memory |
| 682 | * block device (partially) belongs to the given node. |
| 683 | * @mem: The memory block device. |
| 684 | * @nid: The node id. |
| 685 | * @context: The memory initialization context. |
| 686 | * |
| 687 | * Indicate that system RAM falling into this memory block (partially) belongs |
| 688 | * to the given node. If the context indicates ("early") that we are adding the |
| 689 | * node during node device subsystem initialization, this will also properly |
| 690 | * set/adjust mem->zone based on the zone ranges of the given node. |
| 691 | */ |
| 692 | void memory_block_add_nid(struct memory_block *mem, int nid, |
| 693 | enum meminit_context context) |
| 694 | { |
| 695 | if (context == MEMINIT_EARLY && mem->nid != nid) { |
| 696 | /* |
| 697 | * For early memory we have to determine the zone when setting |
| 698 | * the node id and handle multiple nodes spanning a single |
| 699 | * memory block by indicate via zone == NULL that we're not |
| 700 | * dealing with a single zone. So if we're setting the node id |
| 701 | * the first time, determine if there is a single zone. If we're |
| 702 | * setting the node id a second time to a different node, |
| 703 | * invalidate the single detected zone. |
| 704 | */ |
| 705 | if (mem->nid == NUMA_NO_NODE) |
| 706 | mem->zone = early_node_zone_for_memory_block(mem, nid); |
| 707 | else |
| 708 | mem->zone = NULL; |
| 709 | } |
| 710 | |
| 711 | /* |
| 712 | * If this memory block spans multiple nodes, we only indicate |
| 713 | * the last processed node. If we span multiple nodes (not applicable |
| 714 | * to hotplugged memory), zone == NULL will prohibit memory offlining |
| 715 | * and consequently unplug. |
| 716 | */ |
| 717 | mem->nid = nid; |
| 718 | } |
| 719 | #endif |
| 720 | |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 721 | static int add_memory_block(unsigned long block_id, unsigned long state, |
| 722 | unsigned long nr_vmemmap_pages, |
| 723 | struct memory_group *group) |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 724 | { |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 725 | struct memory_block *mem; |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 726 | int ret = 0; |
| 727 | |
David Hildenbrand | dd62528 | 2019-07-18 15:57:53 -0700 | [diff] [blame] | 728 | mem = find_memory_block_by_id(block_id); |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 729 | if (mem) { |
| 730 | put_device(&mem->dev); |
| 731 | return -EEXIST; |
| 732 | } |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 733 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 734 | if (!mem) |
| 735 | return -ENOMEM; |
| 736 | |
David Hildenbrand | 1811582 | 2019-07-18 15:56:46 -0700 | [diff] [blame] | 737 | mem->start_section_nr = block_id * sections_per_block; |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 738 | mem->state = state; |
David Hildenbrand | d84f2f5 | 2019-09-23 15:35:40 -0700 | [diff] [blame] | 739 | mem->nid = NUMA_NO_NODE; |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 740 | mem->nr_vmemmap_pages = nr_vmemmap_pages; |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 741 | INIT_LIST_HEAD(&mem->group_next); |
| 742 | |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 743 | #ifndef CONFIG_NUMA |
| 744 | if (state == MEM_ONLINE) |
| 745 | /* |
| 746 | * MEM_ONLINE at this point implies early memory. With NUMA, |
| 747 | * we'll determine the zone when setting the node id via |
| 748 | * memory_block_add_nid(). Memory hotplug updated the zone |
| 749 | * manually when memory onlining/offlining succeeds. |
| 750 | */ |
| 751 | mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE); |
| 752 | #endif /* CONFIG_NUMA */ |
| 753 | |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 754 | ret = __add_memory_block(mem); |
David Hildenbrand | 7ea0d2d | 2022-03-22 14:47:09 -0700 | [diff] [blame] | 755 | if (ret) |
| 756 | return ret; |
| 757 | |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 758 | if (group) { |
| 759 | mem->group = group; |
| 760 | list_add(&mem->group_next, &group->memory_blocks); |
| 761 | } |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 762 | |
David Hildenbrand | 7ea0d2d | 2022-03-22 14:47:09 -0700 | [diff] [blame] | 763 | return 0; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 764 | } |
| 765 | |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 766 | static int __init add_boot_memory_block(unsigned long base_section_nr) |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 767 | { |
David Hildenbrand | 68c3a6a | 2020-04-06 20:06:40 -0700 | [diff] [blame] | 768 | int section_count = 0; |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 769 | unsigned long nr; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 770 | |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 771 | for (nr = base_section_nr; nr < base_section_nr + sections_per_block; |
| 772 | nr++) |
| 773 | if (present_section_nr(nr)) |
David Hildenbrand | 1811582 | 2019-07-18 15:56:46 -0700 | [diff] [blame] | 774 | section_count++; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 775 | |
Seth Jennings | cb5e39b | 2013-08-20 12:13:03 -0500 | [diff] [blame] | 776 | if (section_count == 0) |
| 777 | return 0; |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 778 | return add_memory_block(memory_block_id(base_section_nr), |
| 779 | MEM_ONLINE, 0, NULL); |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 780 | } |
| 781 | |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 782 | static int add_hotplug_memory_block(unsigned long block_id, |
| 783 | unsigned long nr_vmemmap_pages, |
| 784 | struct memory_group *group) |
| 785 | { |
| 786 | return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group); |
| 787 | } |
| 788 | |
| 789 | static void remove_memory_block(struct memory_block *memory) |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 790 | { |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 791 | if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) |
| 792 | return; |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 793 | |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 794 | WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL); |
| 795 | |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 796 | if (memory->group) { |
| 797 | list_del(&memory->group_next); |
| 798 | memory->group = NULL; |
| 799 | } |
| 800 | |
David Hildenbrand | cb7b3a3 | 2019-05-13 17:21:37 -0700 | [diff] [blame] | 801 | /* drop the ref. we got via find_memory_block() */ |
Seth Jennings | df2b717 | 2013-08-20 12:12:59 -0500 | [diff] [blame] | 802 | put_device(&memory->dev); |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 803 | device_unregister(&memory->dev); |
| 804 | } |
| 805 | |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 806 | /* |
| 807 | * Create memory block devices for the given memory area. Start and size |
| 808 | * have to be aligned to memory block granularity. Memory block devices |
| 809 | * will be initialized as offline. |
David Hildenbrand | 848e19a | 2019-11-30 17:54:14 -0800 | [diff] [blame] | 810 | * |
| 811 | * Called under device_hotplug_lock. |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 812 | */ |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 813 | int create_memory_block_devices(unsigned long start, unsigned long size, |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 814 | unsigned long vmemmap_pages, |
| 815 | struct memory_group *group) |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 816 | { |
David Hildenbrand | 90ec010f | 2019-07-18 15:57:40 -0700 | [diff] [blame] | 817 | const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
| 818 | unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 819 | struct memory_block *mem; |
| 820 | unsigned long block_id; |
| 821 | int ret = 0; |
| 822 | |
| 823 | if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || |
| 824 | !IS_ALIGNED(size, memory_block_size_bytes()))) |
| 825 | return -EINVAL; |
| 826 | |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 827 | for (block_id = start_block_id; block_id != end_block_id; block_id++) { |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 828 | ret = add_hotplug_memory_block(block_id, vmemmap_pages, group); |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 829 | if (ret) |
| 830 | break; |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 831 | } |
| 832 | if (ret) { |
| 833 | end_block_id = block_id; |
| 834 | for (block_id = start_block_id; block_id != end_block_id; |
| 835 | block_id++) { |
David Hildenbrand | dd62528 | 2019-07-18 15:57:53 -0700 | [diff] [blame] | 836 | mem = find_memory_block_by_id(block_id); |
David Hildenbrand | 848e19a | 2019-11-30 17:54:14 -0800 | [diff] [blame] | 837 | if (WARN_ON_ONCE(!mem)) |
| 838 | continue; |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 839 | remove_memory_block(mem); |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 840 | } |
| 841 | } |
David Hildenbrand | db051a0 | 2019-07-18 15:56:56 -0700 | [diff] [blame] | 842 | return ret; |
| 843 | } |
| 844 | |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 845 | /* |
| 846 | * Remove memory block devices for the given memory area. Start and size |
| 847 | * have to be aligned to memory block granularity. Memory block devices |
| 848 | * have to be offline. |
David Hildenbrand | 848e19a | 2019-11-30 17:54:14 -0800 | [diff] [blame] | 849 | * |
| 850 | * Called under device_hotplug_lock. |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 851 | */ |
| 852 | void remove_memory_block_devices(unsigned long start, unsigned long size) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 853 | { |
David Hildenbrand | 90ec010f | 2019-07-18 15:57:40 -0700 | [diff] [blame] | 854 | const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
| 855 | const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 856 | struct memory_block *mem; |
David Hildenbrand | 90ec010f | 2019-07-18 15:57:40 -0700 | [diff] [blame] | 857 | unsigned long block_id; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 858 | |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 859 | if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || |
| 860 | !IS_ALIGNED(size, memory_block_size_bytes()))) |
David Hildenbrand | cb7b3a3 | 2019-05-13 17:21:37 -0700 | [diff] [blame] | 861 | return; |
| 862 | |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 863 | for (block_id = start_block_id; block_id != end_block_id; block_id++) { |
David Hildenbrand | dd62528 | 2019-07-18 15:57:53 -0700 | [diff] [blame] | 864 | mem = find_memory_block_by_id(block_id); |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 865 | if (WARN_ON_ONCE(!mem)) |
| 866 | continue; |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 867 | unregister_memory_block_under_nodes(mem); |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 868 | remove_memory_block(mem); |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 869 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 870 | } |
| 871 | |
Yasuaki Ishimatsu | 6677e3e | 2013-02-22 16:32:52 -0800 | [diff] [blame] | 872 | /* return true if the memory block is offlined, otherwise, return false */ |
| 873 | bool is_memblock_offlined(struct memory_block *mem) |
| 874 | { |
| 875 | return mem->state == MEM_OFFLINE; |
| 876 | } |
| 877 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 878 | static struct attribute *memory_root_attrs[] = { |
| 879 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
| 880 | &dev_attr_probe.attr, |
| 881 | #endif |
| 882 | |
| 883 | #ifdef CONFIG_MEMORY_FAILURE |
| 884 | &dev_attr_soft_offline_page.attr, |
| 885 | &dev_attr_hard_offline_page.attr, |
| 886 | #endif |
| 887 | |
| 888 | &dev_attr_block_size_bytes.attr, |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 889 | &dev_attr_auto_online_blocks.attr, |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 890 | NULL |
| 891 | }; |
| 892 | |
Rikard Falkeborn | 5a57676 | 2021-05-28 23:34:08 +0200 | [diff] [blame] | 893 | static const struct attribute_group memory_root_attr_group = { |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 894 | .attrs = memory_root_attrs, |
| 895 | }; |
| 896 | |
| 897 | static const struct attribute_group *memory_root_attr_groups[] = { |
| 898 | &memory_root_attr_group, |
| 899 | NULL, |
| 900 | }; |
| 901 | |
Wen Congyang | e90bdb7 | 2012-10-08 16:34:01 -0700 | [diff] [blame] | 902 | /* |
David Hildenbrand | 848e19a | 2019-11-30 17:54:14 -0800 | [diff] [blame] | 903 | * Initialize the sysfs support for memory devices. At the time this function |
| 904 | * is called, we cannot have concurrent creation/deletion of memory block |
| 905 | * devices, the device_hotplug_lock is not needed. |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 906 | */ |
David Hildenbrand | 902ce63b | 2019-09-23 15:35:46 -0700 | [diff] [blame] | 907 | void __init memory_dev_init(void) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 908 | { |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 909 | int ret; |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 910 | unsigned long block_sz, nr; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 911 | |
David Hildenbrand | 902ce63b | 2019-09-23 15:35:46 -0700 | [diff] [blame] | 912 | /* Validate the configured memory block size */ |
| 913 | block_sz = memory_block_size_bytes(); |
| 914 | if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE) |
| 915 | panic("Memory block size not suitable: 0x%lx\n", block_sz); |
| 916 | sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; |
| 917 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 918 | ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 919 | if (ret) |
David Hildenbrand | 848e19a | 2019-11-30 17:54:14 -0800 | [diff] [blame] | 920 | panic("%s() failed to register subsystem: %d\n", __func__, ret); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 921 | |
| 922 | /* |
| 923 | * Create entries for memory sections that were found |
| 924 | * during boot and have been initialized |
| 925 | */ |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 926 | for (nr = 0; nr <= __highest_present_section_nr; |
| 927 | nr += sections_per_block) { |
David Hildenbrand | 2aa065f | 2022-03-22 14:47:34 -0700 | [diff] [blame] | 928 | ret = add_boot_memory_block(nr); |
David Hildenbrand | 848e19a | 2019-11-30 17:54:14 -0800 | [diff] [blame] | 929 | if (ret) |
| 930 | panic("%s() failed to add memory block: %d\n", __func__, |
| 931 | ret); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 932 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 933 | } |
David Hildenbrand | ea88464 | 2019-07-18 15:57:50 -0700 | [diff] [blame] | 934 | |
| 935 | /** |
| 936 | * walk_memory_blocks - walk through all present memory blocks overlapped |
| 937 | * by the range [start, start + size) |
| 938 | * |
| 939 | * @start: start address of the memory range |
| 940 | * @size: size of the memory range |
| 941 | * @arg: argument passed to func |
| 942 | * @func: callback for each memory section walked |
| 943 | * |
| 944 | * This function walks through all present memory blocks overlapped by the |
| 945 | * range [start, start + size), calling func on each memory block. |
| 946 | * |
| 947 | * In case func() returns an error, walking is aborted and the error is |
| 948 | * returned. |
Scott Cheloha | 4fb6eab | 2020-06-03 16:03:48 -0700 | [diff] [blame] | 949 | * |
| 950 | * Called under device_hotplug_lock. |
David Hildenbrand | ea88464 | 2019-07-18 15:57:50 -0700 | [diff] [blame] | 951 | */ |
| 952 | int walk_memory_blocks(unsigned long start, unsigned long size, |
| 953 | void *arg, walk_memory_blocks_func_t func) |
| 954 | { |
| 955 | const unsigned long start_block_id = phys_to_block_id(start); |
| 956 | const unsigned long end_block_id = phys_to_block_id(start + size - 1); |
| 957 | struct memory_block *mem; |
| 958 | unsigned long block_id; |
| 959 | int ret = 0; |
| 960 | |
David Hildenbrand | dd62528 | 2019-07-18 15:57:53 -0700 | [diff] [blame] | 961 | if (!size) |
| 962 | return 0; |
| 963 | |
David Hildenbrand | ea88464 | 2019-07-18 15:57:50 -0700 | [diff] [blame] | 964 | for (block_id = start_block_id; block_id <= end_block_id; block_id++) { |
David Hildenbrand | dd62528 | 2019-07-18 15:57:53 -0700 | [diff] [blame] | 965 | mem = find_memory_block_by_id(block_id); |
David Hildenbrand | ea88464 | 2019-07-18 15:57:50 -0700 | [diff] [blame] | 966 | if (!mem) |
| 967 | continue; |
| 968 | |
| 969 | ret = func(mem, arg); |
| 970 | put_device(&mem->dev); |
| 971 | if (ret) |
| 972 | break; |
| 973 | } |
| 974 | return ret; |
| 975 | } |
David Hildenbrand | 2c91f8f | 2019-11-15 17:34:57 -0800 | [diff] [blame] | 976 | |
| 977 | struct for_each_memory_block_cb_data { |
| 978 | walk_memory_blocks_func_t func; |
| 979 | void *arg; |
| 980 | }; |
| 981 | |
| 982 | static int for_each_memory_block_cb(struct device *dev, void *data) |
| 983 | { |
| 984 | struct memory_block *mem = to_memory_block(dev); |
| 985 | struct for_each_memory_block_cb_data *cb_data = data; |
| 986 | |
| 987 | return cb_data->func(mem, cb_data->arg); |
| 988 | } |
| 989 | |
| 990 | /** |
| 991 | * for_each_memory_block - walk through all present memory blocks |
| 992 | * |
| 993 | * @arg: argument passed to func |
| 994 | * @func: callback for each memory block walked |
| 995 | * |
| 996 | * This function walks through all present memory blocks, calling func on |
| 997 | * each memory block. |
| 998 | * |
| 999 | * In case func() returns an error, walking is aborted and the error is |
| 1000 | * returned. |
| 1001 | */ |
| 1002 | int for_each_memory_block(void *arg, walk_memory_blocks_func_t func) |
| 1003 | { |
| 1004 | struct for_each_memory_block_cb_data cb_data = { |
| 1005 | .func = func, |
| 1006 | .arg = arg, |
| 1007 | }; |
| 1008 | |
| 1009 | return bus_for_each_dev(&memory_subsys, NULL, &cb_data, |
| 1010 | for_each_memory_block_cb); |
| 1011 | } |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 1012 | |
| 1013 | /* |
| 1014 | * This is an internal helper to unify allocation and initialization of |
| 1015 | * memory groups. Note that the passed memory group will be copied to a |
| 1016 | * dynamically allocated memory group. After this call, the passed |
| 1017 | * memory group should no longer be used. |
| 1018 | */ |
| 1019 | static int memory_group_register(struct memory_group group) |
| 1020 | { |
| 1021 | struct memory_group *new_group; |
| 1022 | uint32_t mgid; |
| 1023 | int ret; |
| 1024 | |
| 1025 | if (!node_possible(group.nid)) |
| 1026 | return -EINVAL; |
| 1027 | |
| 1028 | new_group = kzalloc(sizeof(group), GFP_KERNEL); |
| 1029 | if (!new_group) |
| 1030 | return -ENOMEM; |
| 1031 | *new_group = group; |
| 1032 | INIT_LIST_HEAD(&new_group->memory_blocks); |
| 1033 | |
| 1034 | ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b, |
| 1035 | GFP_KERNEL); |
| 1036 | if (ret) { |
| 1037 | kfree(new_group); |
| 1038 | return ret; |
David Hildenbrand | 3fcebf9 | 2021-09-07 19:55:48 -0700 | [diff] [blame] | 1039 | } else if (group.is_dynamic) { |
| 1040 | xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC); |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 1041 | } |
| 1042 | return mgid; |
| 1043 | } |
| 1044 | |
| 1045 | /** |
| 1046 | * memory_group_register_static() - Register a static memory group. |
| 1047 | * @nid: The node id. |
| 1048 | * @max_pages: The maximum number of pages we'll have in this static memory |
| 1049 | * group. |
| 1050 | * |
| 1051 | * Register a new static memory group and return the memory group id. |
| 1052 | * All memory in the group belongs to a single unit, such as a DIMM. All |
| 1053 | * memory belonging to a static memory group is added in one go to be removed |
| 1054 | * in one go -- it's static. |
| 1055 | * |
| 1056 | * Returns an error if out of memory, if the node id is invalid, if no new |
| 1057 | * memory groups can be registered, or if max_pages is invalid (0). Otherwise, |
| 1058 | * returns the new memory group id. |
| 1059 | */ |
| 1060 | int memory_group_register_static(int nid, unsigned long max_pages) |
| 1061 | { |
| 1062 | struct memory_group group = { |
| 1063 | .nid = nid, |
| 1064 | .s = { |
| 1065 | .max_pages = max_pages, |
| 1066 | }, |
| 1067 | }; |
| 1068 | |
| 1069 | if (!max_pages) |
| 1070 | return -EINVAL; |
| 1071 | return memory_group_register(group); |
| 1072 | } |
| 1073 | EXPORT_SYMBOL_GPL(memory_group_register_static); |
| 1074 | |
| 1075 | /** |
| 1076 | * memory_group_register_dynamic() - Register a dynamic memory group. |
| 1077 | * @nid: The node id. |
| 1078 | * @unit_pages: Unit in pages in which is memory added/removed in this dynamic |
| 1079 | * memory group. |
| 1080 | * |
| 1081 | * Register a new dynamic memory group and return the memory group id. |
| 1082 | * Memory within a dynamic memory group is added/removed dynamically |
| 1083 | * in unit_pages. |
| 1084 | * |
| 1085 | * Returns an error if out of memory, if the node id is invalid, if no new |
| 1086 | * memory groups can be registered, or if unit_pages is invalid (0, not a |
| 1087 | * power of two, smaller than a single memory block). Otherwise, returns the |
| 1088 | * new memory group id. |
| 1089 | */ |
| 1090 | int memory_group_register_dynamic(int nid, unsigned long unit_pages) |
| 1091 | { |
| 1092 | struct memory_group group = { |
| 1093 | .nid = nid, |
| 1094 | .is_dynamic = true, |
| 1095 | .d = { |
| 1096 | .unit_pages = unit_pages, |
| 1097 | }, |
| 1098 | }; |
| 1099 | |
| 1100 | if (!unit_pages || !is_power_of_2(unit_pages) || |
| 1101 | unit_pages < PHYS_PFN(memory_block_size_bytes())) |
| 1102 | return -EINVAL; |
| 1103 | return memory_group_register(group); |
| 1104 | } |
| 1105 | EXPORT_SYMBOL_GPL(memory_group_register_dynamic); |
| 1106 | |
| 1107 | /** |
| 1108 | * memory_group_unregister() - Unregister a memory group. |
| 1109 | * @mgid: the memory group id |
| 1110 | * |
| 1111 | * Unregister a memory group. If any memory block still belongs to this |
| 1112 | * memory group, unregistering will fail. |
| 1113 | * |
| 1114 | * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some |
| 1115 | * memory blocks still belong to this memory group and returns 0 if |
| 1116 | * unregistering succeeded. |
| 1117 | */ |
| 1118 | int memory_group_unregister(int mgid) |
| 1119 | { |
| 1120 | struct memory_group *group; |
| 1121 | |
| 1122 | if (mgid < 0) |
| 1123 | return -EINVAL; |
| 1124 | |
| 1125 | group = xa_load(&memory_groups, mgid); |
| 1126 | if (!group) |
| 1127 | return -EINVAL; |
| 1128 | if (!list_empty(&group->memory_blocks)) |
| 1129 | return -EBUSY; |
| 1130 | xa_erase(&memory_groups, mgid); |
| 1131 | kfree(group); |
| 1132 | return 0; |
| 1133 | } |
| 1134 | EXPORT_SYMBOL_GPL(memory_group_unregister); |
| 1135 | |
| 1136 | /* |
| 1137 | * This is an internal helper only to be used in core memory hotplug code to |
| 1138 | * lookup a memory group. We don't care about locking, as we don't expect a |
| 1139 | * memory group to get unregistered while adding memory to it -- because |
| 1140 | * the group and the memory is managed by the same driver. |
| 1141 | */ |
| 1142 | struct memory_group *memory_group_find_by_id(int mgid) |
| 1143 | { |
| 1144 | return xa_load(&memory_groups, mgid); |
| 1145 | } |
David Hildenbrand | 3fcebf9 | 2021-09-07 19:55:48 -0700 | [diff] [blame] | 1146 | |
| 1147 | /* |
| 1148 | * This is an internal helper only to be used in core memory hotplug code to |
| 1149 | * walk all dynamic memory groups excluding a given memory group, either |
| 1150 | * belonging to a specific node, or belonging to any node. |
| 1151 | */ |
| 1152 | int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, |
| 1153 | struct memory_group *excluded, void *arg) |
| 1154 | { |
| 1155 | struct memory_group *group; |
| 1156 | unsigned long index; |
| 1157 | int ret = 0; |
| 1158 | |
| 1159 | xa_for_each_marked(&memory_groups, index, group, |
| 1160 | MEMORY_GROUP_MARK_DYNAMIC) { |
| 1161 | if (group == excluded) |
| 1162 | continue; |
| 1163 | #ifdef CONFIG_NUMA |
| 1164 | if (nid != NUMA_NO_NODE && group->nid != nid) |
| 1165 | continue; |
| 1166 | #endif /* CONFIG_NUMA */ |
| 1167 | ret = func(group, arg); |
| 1168 | if (ret) |
| 1169 | break; |
| 1170 | } |
| 1171 | return ret; |
| 1172 | } |