blob: 084d67fd55cc8c4ce90299774ba860fbeb1b0bec [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dave Hansen3947be12005-10-29 18:16:54 -07002/*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003 * Memory subsystem support
Dave Hansen3947be12005-10-29 18:16:54 -07004 *
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
7 *
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
12 */
13
Dave Hansen3947be12005-10-29 18:16:54 -070014#include <linux/module.h>
15#include <linux/init.h>
Dave Hansen3947be12005-10-29 18:16:54 -070016#include <linux/topology.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080017#include <linux/capability.h>
Dave Hansen3947be12005-10-29 18:16:54 -070018#include <linux/device.h>
19#include <linux/memory.h>
Dave Hansen3947be12005-10-29 18:16:54 -070020#include <linux/memory_hotplug.h>
21#include <linux/mm.h>
Shaohua Li9f1b16a2008-10-18 20:27:12 -070022#include <linux/stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Scott Cheloha4fb6eab2020-06-03 16:03:48 -070024#include <linux/xarray.h>
Shaohua Li9f1b16a2008-10-18 20:27:12 -070025
Arun Sharma600634972011-07-26 16:09:06 -070026#include <linux/atomic.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080027#include <linux/uaccess.h>
Dave Hansen3947be12005-10-29 18:16:54 -070028
29#define MEMORY_CLASS_NAME "memory"
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -060030
David Hildenbrand4dc82072020-04-06 20:07:24 -070031static const char *const online_type_to_str[] = {
32 [MMOP_OFFLINE] = "offline",
33 [MMOP_ONLINE] = "online",
34 [MMOP_ONLINE_KERNEL] = "online_kernel",
35 [MMOP_ONLINE_MOVABLE] = "online_movable",
36};
37
Anshuman Khandual1adf8b42021-02-25 17:17:13 -080038int mhp_online_type_from_str(const char *str)
David Hildenbrand4dc82072020-04-06 20:07:24 -070039{
40 int i;
41
42 for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
43 if (sysfs_streq(str, online_type_to_str[i]))
44 return i;
45 }
46 return -EINVAL;
47}
48
Gu Zheng7315f0c2013-08-28 14:38:27 +080049#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
50
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -060051static int sections_per_block;
52
Wei Yang178bdbe2020-06-23 10:57:01 +080053static inline unsigned long memory_block_id(unsigned long section_nr)
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -060054{
55 return section_nr / sections_per_block;
56}
Dave Hansen3947be12005-10-29 18:16:54 -070057
David Hildenbrand90ec010f2019-07-18 15:57:40 -070058static inline unsigned long pfn_to_block_id(unsigned long pfn)
David Hildenbranddb051a02019-07-18 15:56:56 -070059{
Wei Yang178bdbe2020-06-23 10:57:01 +080060 return memory_block_id(pfn_to_section_nr(pfn));
David Hildenbranddb051a02019-07-18 15:56:56 -070061}
62
David Hildenbrandea884642019-07-18 15:57:50 -070063static inline unsigned long phys_to_block_id(unsigned long phys)
64{
65 return pfn_to_block_id(PFN_DOWN(phys));
66}
67
Rafael J. Wysocki4960e052013-05-08 14:18:37 +020068static int memory_subsys_online(struct device *dev);
69static int memory_subsys_offline(struct device *dev);
70
Kay Sievers10fbcf42011-12-21 14:48:43 -080071static struct bus_type memory_subsys = {
Kay Sieversaf5ca3f42007-12-20 02:09:39 +010072 .name = MEMORY_CLASS_NAME,
Kay Sievers10fbcf42011-12-21 14:48:43 -080073 .dev_name = MEMORY_CLASS_NAME,
Rafael J. Wysocki4960e052013-05-08 14:18:37 +020074 .online = memory_subsys_online,
75 .offline = memory_subsys_offline,
Dave Hansen3947be12005-10-29 18:16:54 -070076};
77
Scott Cheloha4fb6eab2020-06-03 16:03:48 -070078/*
79 * Memory blocks are cached in a local radix tree to avoid
80 * a costly linear search for the corresponding device on
81 * the subsystem bus.
82 */
83static DEFINE_XARRAY(memory_blocks);
84
David Hildenbrand028fc572021-09-07 19:55:26 -070085/*
86 * Memory groups, indexed by memory group id (mgid).
87 */
88static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
David Hildenbrand3fcebf92021-09-07 19:55:48 -070089#define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
David Hildenbrand028fc572021-09-07 19:55:26 -070090
Alan Sterne041c682006-03-27 01:16:30 -080091static BLOCKING_NOTIFIER_HEAD(memory_chain);
Dave Hansen3947be12005-10-29 18:16:54 -070092
Andy Whitcroft98a38eb2006-01-06 00:10:35 -080093int register_memory_notifier(struct notifier_block *nb)
Dave Hansen3947be12005-10-29 18:16:54 -070094{
Ioana Ciornei2aeebca2015-03-08 12:48:35 +020095 return blocking_notifier_chain_register(&memory_chain, nb);
Dave Hansen3947be12005-10-29 18:16:54 -070096}
Hannes Hering3c82c302008-05-07 14:43:01 +020097EXPORT_SYMBOL(register_memory_notifier);
Dave Hansen3947be12005-10-29 18:16:54 -070098
Andy Whitcroft98a38eb2006-01-06 00:10:35 -080099void unregister_memory_notifier(struct notifier_block *nb)
Dave Hansen3947be12005-10-29 18:16:54 -0700100{
Ioana Ciornei2aeebca2015-03-08 12:48:35 +0200101 blocking_notifier_chain_unregister(&memory_chain, nb);
Dave Hansen3947be12005-10-29 18:16:54 -0700102}
Hannes Hering3c82c302008-05-07 14:43:01 +0200103EXPORT_SYMBOL(unregister_memory_notifier);
Dave Hansen3947be12005-10-29 18:16:54 -0700104
Yasuaki Ishimatsufa7194e2012-12-11 16:00:44 -0800105static void memory_block_release(struct device *dev)
106{
Gu Zheng7315f0c2013-08-28 14:38:27 +0800107 struct memory_block *mem = to_memory_block(dev);
Yasuaki Ishimatsufa7194e2012-12-11 16:00:44 -0800108
109 kfree(mem);
110}
111
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600112unsigned long __weak memory_block_size_bytes(void)
113{
114 return MIN_MEMORY_BLOCK_SIZE;
115}
Dave Hansenc221c0b2019-02-25 10:57:40 -0800116EXPORT_SYMBOL_GPL(memory_block_size_bytes);
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600117
Dave Hansen3947be12005-10-29 18:16:54 -0700118/*
David Hildenbrandf915fb72019-09-23 15:35:43 -0700119 * Show the first physical section index (number) of this memory block.
Dave Hansen3947be12005-10-29 18:16:54 -0700120 */
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100121static ssize_t phys_index_show(struct device *dev,
122 struct device_attribute *attr, char *buf)
Dave Hansen3947be12005-10-29 18:16:54 -0700123{
Gu Zheng7315f0c2013-08-28 14:38:27 +0800124 struct memory_block *mem = to_memory_block(dev);
Nathan Fontenotd3360162011-01-20 10:44:29 -0600125 unsigned long phys_index;
126
127 phys_index = mem->start_section_nr / sections_per_block;
Joe Perches948b3ed2020-09-16 13:40:42 -0700128
Joe Perchesaa838892020-09-16 13:40:39 -0700129 return sysfs_emit(buf, "%08lx\n", phys_index);
Nathan Fontenotd3360162011-01-20 10:44:29 -0600130}
131
Dave Hansen3947be12005-10-29 18:16:54 -0700132/*
David Hildenbrand53cdc1c2020-03-28 19:17:19 -0700133 * Legacy interface that we cannot remove. Always indicate "removable"
134 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700135 */
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100136static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
137 char *buf)
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700138{
Joe Perchesaa838892020-09-16 13:40:39 -0700139 return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700140}
141
142/*
Dave Hansen3947be12005-10-29 18:16:54 -0700143 * online, offline, going offline, etc.
144 */
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100145static ssize_t state_show(struct device *dev, struct device_attribute *attr,
146 char *buf)
Dave Hansen3947be12005-10-29 18:16:54 -0700147{
Gu Zheng7315f0c2013-08-28 14:38:27 +0800148 struct memory_block *mem = to_memory_block(dev);
Joe Perches973c3912020-09-16 13:40:40 -0700149 const char *output;
Dave Hansen3947be12005-10-29 18:16:54 -0700150
151 /*
152 * We can probably put these states in a nice little array
153 * so that they're not open-coded
154 */
155 switch (mem->state) {
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200156 case MEM_ONLINE:
Joe Perches973c3912020-09-16 13:40:40 -0700157 output = "online";
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200158 break;
159 case MEM_OFFLINE:
Joe Perches973c3912020-09-16 13:40:40 -0700160 output = "offline";
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200161 break;
162 case MEM_GOING_OFFLINE:
Joe Perches973c3912020-09-16 13:40:40 -0700163 output = "going-offline";
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200164 break;
165 default:
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200166 WARN_ON(1);
Joe Perches973c3912020-09-16 13:40:40 -0700167 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
Dave Hansen3947be12005-10-29 18:16:54 -0700168 }
169
Joe Perches973c3912020-09-16 13:40:40 -0700170 return sysfs_emit(buf, "%s\n", output);
Dave Hansen3947be12005-10-29 18:16:54 -0700171}
172
Yasunori Goto7b78d332007-10-21 16:41:36 -0700173int memory_notify(unsigned long val, void *v)
Dave Hansen3947be12005-10-29 18:16:54 -0700174{
Alan Sterne041c682006-03-27 01:16:30 -0800175 return blocking_notifier_call_chain(&memory_chain, val, v);
Dave Hansen3947be12005-10-29 18:16:54 -0700176}
177
Oscar Salvador8736cc22021-05-04 18:39:33 -0700178static int memory_block_online(struct memory_block *mem)
179{
180 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
181 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700182 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
183 struct zone *zone;
184 int ret;
Oscar Salvador8736cc22021-05-04 18:39:33 -0700185
David Hildenbrand445fcf72021-09-07 19:55:45 -0700186 zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
187 start_pfn, nr_pages);
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700188
189 /*
190 * Although vmemmap pages have a different lifecycle than the pages
191 * they describe (they remain until the memory is unplugged), doing
192 * their initialization and accounting at memory onlining/offlining
193 * stage helps to keep accounting easier to follow - e.g vmemmaps
194 * belong to the same zone as the memory they backed.
195 */
196 if (nr_vmemmap_pages) {
197 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
198 if (ret)
199 return ret;
200 }
201
202 ret = online_pages(start_pfn + nr_vmemmap_pages,
David Hildenbrand836809e2021-09-07 19:55:30 -0700203 nr_pages - nr_vmemmap_pages, zone, mem->group);
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700204 if (ret) {
205 if (nr_vmemmap_pages)
206 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
207 return ret;
208 }
209
210 /*
211 * Account once onlining succeeded. If the zone was unpopulated, it is
212 * now already properly populated.
213 */
214 if (nr_vmemmap_pages)
David Hildenbrand836809e2021-09-07 19:55:30 -0700215 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
David Hildenbrand4b097002021-09-07 19:55:19 -0700216 nr_vmemmap_pages);
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700217
David Hildenbrand395f6082022-03-22 14:47:31 -0700218 mem->zone = zone;
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700219 return ret;
Oscar Salvador8736cc22021-05-04 18:39:33 -0700220}
221
222static int memory_block_offline(struct memory_block *mem)
223{
224 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
225 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700226 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700227 int ret;
Oscar Salvador8736cc22021-05-04 18:39:33 -0700228
David Hildenbrand395f6082022-03-22 14:47:31 -0700229 if (!mem->zone)
230 return -EINVAL;
231
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700232 /*
233 * Unaccount before offlining, such that unpopulated zone and kthreads
234 * can properly be torn down in offline_pages().
235 */
David Hildenbrand4b097002021-09-07 19:55:19 -0700236 if (nr_vmemmap_pages)
David Hildenbrand836809e2021-09-07 19:55:30 -0700237 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
David Hildenbrand4b097002021-09-07 19:55:19 -0700238 -nr_vmemmap_pages);
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700239
240 ret = offline_pages(start_pfn + nr_vmemmap_pages,
David Hildenbrand395f6082022-03-22 14:47:31 -0700241 nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700242 if (ret) {
243 /* offline_pages() failed. Account back. */
244 if (nr_vmemmap_pages)
David Hildenbrand4b097002021-09-07 19:55:19 -0700245 adjust_present_page_count(pfn_to_page(start_pfn),
David Hildenbrand836809e2021-09-07 19:55:30 -0700246 mem->group, nr_vmemmap_pages);
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700247 return ret;
248 }
249
250 if (nr_vmemmap_pages)
251 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
252
David Hildenbrand395f6082022-03-22 14:47:31 -0700253 mem->zone = NULL;
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700254 return ret;
Oscar Salvador8736cc22021-05-04 18:39:33 -0700255}
256
Dave Hansen3947be12005-10-29 18:16:54 -0700257/*
258 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
259 * OK to have direct references to sparsemem variables in here.
260 */
261static int
Oscar Salvador8736cc22021-05-04 18:39:33 -0700262memory_block_action(struct memory_block *mem, unsigned long action)
Dave Hansen3947be12005-10-29 18:16:54 -0700263{
Dave Hansen3947be12005-10-29 18:16:54 -0700264 int ret;
Dave Hansen3947be12005-10-29 18:16:54 -0700265
Dave Hansen3947be12005-10-29 18:16:54 -0700266 switch (action) {
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200267 case MEM_ONLINE:
Oscar Salvador8736cc22021-05-04 18:39:33 -0700268 ret = memory_block_online(mem);
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200269 break;
270 case MEM_OFFLINE:
Oscar Salvador8736cc22021-05-04 18:39:33 -0700271 ret = memory_block_offline(mem);
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200272 break;
273 default:
274 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
Oscar Salvador8736cc22021-05-04 18:39:33 -0700275 "%ld\n", __func__, mem->start_section_nr, action, action);
Ioana Ciornei3d3af6af2015-03-08 12:29:04 +0200276 ret = -EINVAL;
Dave Hansen3947be12005-10-29 18:16:54 -0700277 }
Dave Hansen3947be12005-10-29 18:16:54 -0700278
279 return ret;
280}
281
Nathan Fontenotdc18d702017-02-24 15:00:02 -0800282static int memory_block_change_state(struct memory_block *mem,
Seth Jenningsfa2be402013-08-20 16:05:05 -0500283 unsigned long to_state, unsigned long from_state_req)
Dave Hansen3947be12005-10-29 18:16:54 -0700284{
Greg Kroah-Hartmande0ed362011-10-18 14:00:57 -0700285 int ret = 0;
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600286
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200287 if (mem->state != from_state_req)
288 return -EINVAL;
Dave Hansen3947be12005-10-29 18:16:54 -0700289
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600290 if (to_state == MEM_OFFLINE)
291 mem->state = MEM_GOING_OFFLINE;
292
Oscar Salvador8736cc22021-05-04 18:39:33 -0700293 ret = memory_block_action(mem, to_state);
Rafael J. Wysockib2c064b2013-05-23 10:38:55 +0200294 mem->state = ret ? from_state_req : to_state;
Seth Jenningsfa2be402013-08-20 16:05:05 -0500295
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200296 return ret;
297}
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600298
Seth Jenningsfa2be402013-08-20 16:05:05 -0500299/* The device lock serializes operations on memory_subsys_[online|offline] */
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200300static int memory_subsys_online(struct device *dev)
301{
Gu Zheng7315f0c2013-08-28 14:38:27 +0800302 struct memory_block *mem = to_memory_block(dev);
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200303 int ret;
Dave Hansen3947be12005-10-29 18:16:54 -0700304
Seth Jenningsfa2be402013-08-20 16:05:05 -0500305 if (mem->state == MEM_ONLINE)
306 return 0;
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200307
Seth Jenningsfa2be402013-08-20 16:05:05 -0500308 /*
David Hildenbrandefc978a2020-04-06 20:07:20 -0700309 * When called via device_online() without configuring the online_type,
310 * we want to default to MMOP_ONLINE.
Seth Jenningsfa2be402013-08-20 16:05:05 -0500311 */
David Hildenbrandefc978a2020-04-06 20:07:20 -0700312 if (mem->online_type == MMOP_OFFLINE)
David Hildenbrand956f8b42020-04-06 20:07:16 -0700313 mem->online_type = MMOP_ONLINE;
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200314
Seth Jenningsfa2be402013-08-20 16:05:05 -0500315 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
David Hildenbrandefc978a2020-04-06 20:07:20 -0700316 mem->online_type = MMOP_OFFLINE;
Seth Jenningsfa2be402013-08-20 16:05:05 -0500317
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200318 return ret;
319}
320
321static int memory_subsys_offline(struct device *dev)
322{
Gu Zheng7315f0c2013-08-28 14:38:27 +0800323 struct memory_block *mem = to_memory_block(dev);
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200324
Seth Jenningsfa2be402013-08-20 16:05:05 -0500325 if (mem->state == MEM_OFFLINE)
326 return 0;
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200327
Seth Jenningsfa2be402013-08-20 16:05:05 -0500328 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200329}
330
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100331static ssize_t state_store(struct device *dev, struct device_attribute *attr,
332 const char *buf, size_t count)
Dave Hansen3947be12005-10-29 18:16:54 -0700333{
Anshuman Khandual1adf8b42021-02-25 17:17:13 -0800334 const int online_type = mhp_online_type_from_str(buf);
Gu Zheng7315f0c2013-08-28 14:38:27 +0800335 struct memory_block *mem = to_memory_block(dev);
David Hildenbrand4dc82072020-04-06 20:07:24 -0700336 int ret;
337
338 if (online_type < 0)
339 return -EINVAL;
Dave Hansen3947be12005-10-29 18:16:54 -0700340
Rafael J. Wysocki5e33bc42013-08-28 21:41:01 +0200341 ret = lock_device_hotplug_sysfs();
342 if (ret)
343 return ret;
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200344
Seth Jenningsfa2be402013-08-20 16:05:05 -0500345 switch (online_type) {
Tang Chen4f7c6b42014-08-06 16:05:13 -0700346 case MMOP_ONLINE_KERNEL:
347 case MMOP_ONLINE_MOVABLE:
David Hildenbrand956f8b42020-04-06 20:07:16 -0700348 case MMOP_ONLINE:
David Hildenbrand381eab42018-10-30 15:10:29 -0700349 /* mem->online_type is protected by device_hotplug_lock */
Seth Jenningsfa2be402013-08-20 16:05:05 -0500350 mem->online_type = online_type;
351 ret = device_online(&mem->dev);
352 break;
Tang Chen4f7c6b42014-08-06 16:05:13 -0700353 case MMOP_OFFLINE:
Seth Jenningsfa2be402013-08-20 16:05:05 -0500354 ret = device_offline(&mem->dev);
355 break;
356 default:
357 ret = -EINVAL; /* should never happen */
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200358 }
Rafael J. Wysocki4960e052013-05-08 14:18:37 +0200359
360 unlock_device_hotplug();
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600361
Reza Arbabd66ba152016-10-07 17:00:15 -0700362 if (ret < 0)
Dave Hansen3947be12005-10-29 18:16:54 -0700363 return ret;
Reza Arbabd66ba152016-10-07 17:00:15 -0700364 if (ret)
365 return -EINVAL;
366
Dave Hansen3947be12005-10-29 18:16:54 -0700367 return count;
368}
369
370/*
David Hildenbrande9a2e482021-02-25 17:17:24 -0800371 * Legacy interface that we cannot remove: s390x exposes the storage increment
372 * covered by a memory block, allowing for identifying which memory blocks
373 * comprise a storage increment. Since a memory block spans complete
374 * storage increments nowadays, this interface is basically unused. Other
375 * archs never exposed != 0.
Dave Hansen3947be12005-10-29 18:16:54 -0700376 */
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100377static ssize_t phys_device_show(struct device *dev,
Kay Sievers10fbcf42011-12-21 14:48:43 -0800378 struct device_attribute *attr, char *buf)
Dave Hansen3947be12005-10-29 18:16:54 -0700379{
Gu Zheng7315f0c2013-08-28 14:38:27 +0800380 struct memory_block *mem = to_memory_block(dev);
David Hildenbrande9a2e482021-02-25 17:17:24 -0800381 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
Joe Perches948b3ed2020-09-16 13:40:42 -0700382
David Hildenbrande9a2e482021-02-25 17:17:24 -0800383 return sysfs_emit(buf, "%d\n",
384 arch_get_memory_phys_device(start_pfn));
Dave Hansen3947be12005-10-29 18:16:54 -0700385}
386
Zhang Zhened2f2402014-10-09 15:26:31 -0700387#ifdef CONFIG_MEMORY_HOTREMOVE
Joe Perches973c3912020-09-16 13:40:40 -0700388static int print_allowed_zone(char *buf, int len, int nid,
David Hildenbrand445fcf72021-09-07 19:55:45 -0700389 struct memory_group *group,
Joe Perches973c3912020-09-16 13:40:40 -0700390 unsigned long start_pfn, unsigned long nr_pages,
391 int online_type, struct zone *default_zone)
Michal Hockoe5e68932017-09-06 16:19:37 -0700392{
393 struct zone *zone;
394
David Hildenbrand445fcf72021-09-07 19:55:45 -0700395 zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
Joe Perches973c3912020-09-16 13:40:40 -0700396 if (zone == default_zone)
397 return 0;
Joe Perches948b3ed2020-09-16 13:40:42 -0700398
Joe Perches973c3912020-09-16 13:40:40 -0700399 return sysfs_emit_at(buf, len, " %s", zone->name);
Michal Hockoe5e68932017-09-06 16:19:37 -0700400}
401
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100402static ssize_t valid_zones_show(struct device *dev,
Zhang Zhened2f2402014-10-09 15:26:31 -0700403 struct device_attribute *attr, char *buf)
404{
405 struct memory_block *mem = to_memory_block(dev);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700406 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
Zhang Zhened2f2402014-10-09 15:26:31 -0700407 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
David Hildenbrand445fcf72021-09-07 19:55:45 -0700408 struct memory_group *group = mem->group;
Michal Hockoe5e68932017-09-06 16:19:37 -0700409 struct zone *default_zone;
David Hildenbrand445fcf72021-09-07 19:55:45 -0700410 int nid = mem->nid;
Joe Perches973c3912020-09-16 13:40:40 -0700411 int len = 0;
Zhang Zhened2f2402014-10-09 15:26:31 -0700412
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700413 /*
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700414 * Check the existing zone. Make sure that we do that only on the
415 * online nodes otherwise the page_zone is not reliable
416 */
417 if (mem->state == MEM_ONLINE) {
Mikhail Zaslonko4e8346d2018-09-04 15:46:09 -0700418 /*
David Hildenbrand395f6082022-03-22 14:47:31 -0700419 * If !mem->zone, the memory block spans multiple zones and
420 * cannot get offlined.
Mikhail Zaslonko4e8346d2018-09-04 15:46:09 -0700421 */
David Hildenbrand395f6082022-03-22 14:47:31 -0700422 default_zone = mem->zone;
David Hildenbrand92917992020-02-03 17:34:26 -0800423 if (!default_zone)
Joe Perches973c3912020-09-16 13:40:40 -0700424 return sysfs_emit(buf, "%s\n", "none");
425 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700426 goto out;
Zhang Zhened2f2402014-10-09 15:26:31 -0700427 }
428
David Hildenbrand445fcf72021-09-07 19:55:45 -0700429 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
430 start_pfn, nr_pages);
Zhang Zhened2f2402014-10-09 15:26:31 -0700431
Joe Perches973c3912020-09-16 13:40:40 -0700432 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
David Hildenbrand445fcf72021-09-07 19:55:45 -0700433 len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
Joe Perches973c3912020-09-16 13:40:40 -0700434 MMOP_ONLINE_KERNEL, default_zone);
David Hildenbrand445fcf72021-09-07 19:55:45 -0700435 len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
Joe Perches973c3912020-09-16 13:40:40 -0700436 MMOP_ONLINE_MOVABLE, default_zone);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700437out:
Joe Perches948b3ed2020-09-16 13:40:42 -0700438 len += sysfs_emit_at(buf, len, "\n");
Joe Perches973c3912020-09-16 13:40:40 -0700439 return len;
Zhang Zhened2f2402014-10-09 15:26:31 -0700440}
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100441static DEVICE_ATTR_RO(valid_zones);
Zhang Zhened2f2402014-10-09 15:26:31 -0700442#endif
443
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100444static DEVICE_ATTR_RO(phys_index);
445static DEVICE_ATTR_RW(state);
446static DEVICE_ATTR_RO(phys_device);
447static DEVICE_ATTR_RO(removable);
Dave Hansen3947be12005-10-29 18:16:54 -0700448
Dave Hansen3947be12005-10-29 18:16:54 -0700449/*
David Hildenbrandf915fb72019-09-23 15:35:43 -0700450 * Show the memory block size (shared by all memory blocks).
Dave Hansen3947be12005-10-29 18:16:54 -0700451 */
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100452static ssize_t block_size_bytes_show(struct device *dev,
453 struct device_attribute *attr, char *buf)
Dave Hansen3947be12005-10-29 18:16:54 -0700454{
Joe Perchesaa838892020-09-16 13:40:39 -0700455 return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
Dave Hansen3947be12005-10-29 18:16:54 -0700456}
457
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100458static DEVICE_ATTR_RO(block_size_bytes);
Dave Hansen3947be12005-10-29 18:16:54 -0700459
Dave Hansen3947be12005-10-29 18:16:54 -0700460/*
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700461 * Memory auto online policy.
462 */
463
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100464static ssize_t auto_online_blocks_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700466{
Joe Perchesaa838892020-09-16 13:40:39 -0700467 return sysfs_emit(buf, "%s\n",
Anshuman Khandual1adf8b42021-02-25 17:17:13 -0800468 online_type_to_str[mhp_default_online_type]);
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700469}
470
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100471static ssize_t auto_online_blocks_store(struct device *dev,
472 struct device_attribute *attr,
473 const char *buf, size_t count)
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700474{
Anshuman Khandual1adf8b42021-02-25 17:17:13 -0800475 const int online_type = mhp_online_type_from_str(buf);
David Hildenbrand5f47adf2020-04-06 20:07:44 -0700476
477 if (online_type < 0)
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700478 return -EINVAL;
479
Anshuman Khandual1adf8b42021-02-25 17:17:13 -0800480 mhp_default_online_type = online_type;
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700481 return count;
482}
483
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100484static DEVICE_ATTR_RW(auto_online_blocks);
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700485
486/*
Dave Hansen3947be12005-10-29 18:16:54 -0700487 * Some architectures will have custom drivers to do this, and
488 * will not need to do it from userspace. The fake hot-add code
489 * as well as ppc64 will do all of their discovery in userspace
490 * and will require this interface.
491 */
492#ifdef CONFIG_ARCH_MEMORY_PROBE
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100493static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
494 const char *buf, size_t count)
Dave Hansen3947be12005-10-29 18:16:54 -0700495{
496 u64 phys_addr;
John Allencb5490a2016-01-14 15:22:16 -0800497 int nid, ret;
Anton Blanchard61b94fe2011-09-15 06:26:15 +1000498 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
Dave Hansen3947be12005-10-29 18:16:54 -0700499
Zhang Zhenb69deb22014-08-06 16:06:06 -0700500 ret = kstrtoull(buf, 0, &phys_addr);
501 if (ret)
502 return ret;
Dave Hansen3947be12005-10-29 18:16:54 -0700503
Anton Blanchard61b94fe2011-09-15 06:26:15 +1000504 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
505 return -EINVAL;
506
David Hildenbrand8df1d0e2018-10-30 15:10:24 -0700507 ret = lock_device_hotplug_sysfs();
508 if (ret)
zhong jiang37803842019-04-18 17:50:16 -0700509 return ret;
David Hildenbrand8df1d0e2018-10-30 15:10:24 -0700510
John Allencb5490a2016-01-14 15:22:16 -0800511 nid = memory_add_physaddr_to_nid(phys_addr);
David Hildenbrand8df1d0e2018-10-30 15:10:24 -0700512 ret = __add_memory(nid, phys_addr,
David Hildenbrandb6117192020-10-15 20:08:44 -0700513 MIN_MEMORY_BLOCK_SIZE * sections_per_block,
514 MHP_NONE);
Nathan Fontenot6add7cd2011-01-31 10:55:23 -0600515
John Allencb5490a2016-01-14 15:22:16 -0800516 if (ret)
517 goto out;
Dave Hansen3947be12005-10-29 18:16:54 -0700518
Nikanth Karthikesan9f0af692011-03-24 11:46:18 +0530519 ret = count;
520out:
David Hildenbrand8df1d0e2018-10-30 15:10:24 -0700521 unlock_device_hotplug();
Nikanth Karthikesan9f0af692011-03-24 11:46:18 +0530522 return ret;
Dave Hansen3947be12005-10-29 18:16:54 -0700523}
Dave Hansen3947be12005-10-29 18:16:54 -0700524
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100525static DEVICE_ATTR_WO(probe);
Dave Hansen3947be12005-10-29 18:16:54 -0700526#endif
527
Andi Kleenfacb6012009-12-16 12:20:00 +0100528#ifdef CONFIG_MEMORY_FAILURE
529/*
530 * Support for offlining pages of memory
531 */
532
533/* Soft offline a page */
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100534static ssize_t soft_offline_page_store(struct device *dev,
535 struct device_attribute *attr,
536 const char *buf, size_t count)
Andi Kleenfacb6012009-12-16 12:20:00 +0100537{
538 int ret;
539 u64 pfn;
540 if (!capable(CAP_SYS_ADMIN))
541 return -EPERM;
Jingoo Han34da5e62013-07-26 13:10:22 +0900542 if (kstrtoull(buf, 0, &pfn) < 0)
Andi Kleenfacb6012009-12-16 12:20:00 +0100543 return -EINVAL;
544 pfn >>= PAGE_SHIFT;
Naoya Horiguchifeec24a2019-11-30 17:53:38 -0800545 ret = soft_offline_page(pfn, 0);
Andi Kleenfacb6012009-12-16 12:20:00 +0100546 return ret == 0 ? count : ret;
547}
548
549/* Forcibly offline a page, including killing processes. */
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100550static ssize_t hard_offline_page_store(struct device *dev,
551 struct device_attribute *attr,
552 const char *buf, size_t count)
Andi Kleenfacb6012009-12-16 12:20:00 +0100553{
554 int ret;
555 u64 pfn;
556 if (!capable(CAP_SYS_ADMIN))
557 return -EPERM;
Jingoo Han34da5e62013-07-26 13:10:22 +0900558 if (kstrtoull(buf, 0, &pfn) < 0)
Andi Kleenfacb6012009-12-16 12:20:00 +0100559 return -EINVAL;
560 pfn >>= PAGE_SHIFT;
Eric W. Biederman83b575312017-07-09 18:14:01 -0500561 ret = memory_failure(pfn, 0);
luofeid1fe1112022-03-22 14:44:38 -0700562 if (ret == -EOPNOTSUPP)
563 ret = 0;
Andi Kleenfacb6012009-12-16 12:20:00 +0100564 return ret ? ret : count;
565}
566
David Hildenbrand3f8e9172018-12-03 12:16:11 +0100567static DEVICE_ATTR_WO(soft_offline_page);
568static DEVICE_ATTR_WO(hard_offline_page);
Andi Kleenfacb6012009-12-16 12:20:00 +0100569#endif
570
David Hildenbrande9a2e482021-02-25 17:17:24 -0800571/* See phys_device_show(). */
Heiko Carstensbc32df02010-03-15 00:35:03 -0400572int __weak arch_get_memory_phys_device(unsigned long start_pfn)
573{
574 return 0;
575}
Dave Hansen3947be12005-10-29 18:16:54 -0700576
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700577/*
578 * A reference for the returned memory block device is acquired.
579 *
580 * Called under device_hotplug_lock.
581 */
David Hildenbranddd625282019-07-18 15:57:53 -0700582static struct memory_block *find_memory_block_by_id(unsigned long block_id)
Robin Holt98383032010-09-29 14:00:55 -0500583{
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700584 struct memory_block *mem;
Robin Holt98383032010-09-29 14:00:55 -0500585
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700586 mem = xa_load(&memory_blocks, block_id);
587 if (mem)
588 get_device(&mem->dev);
589 return mem;
David Hildenbranddb051a02019-07-18 15:56:56 -0700590}
591
Dave Hansen3947be12005-10-29 18:16:54 -0700592/*
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700593 * Called under device_hotplug_lock.
Dave Hansen3947be12005-10-29 18:16:54 -0700594 */
Ohhoon Kwonfc1f5e92021-09-02 14:57:01 -0700595struct memory_block *find_memory_block(unsigned long section_nr)
Dave Hansen3947be12005-10-29 18:16:54 -0700596{
Ohhoon Kwonfc1f5e92021-09-02 14:57:01 -0700597 unsigned long block_id = memory_block_id(section_nr);
David Hildenbranddd625282019-07-18 15:57:53 -0700598
599 return find_memory_block_by_id(block_id);
Dave Hansen3947be12005-10-29 18:16:54 -0700600}
601
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500602static struct attribute *memory_memblk_attrs[] = {
603 &dev_attr_phys_index.attr,
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500604 &dev_attr_state.attr,
605 &dev_attr_phys_device.attr,
606 &dev_attr_removable.attr,
Zhang Zhened2f2402014-10-09 15:26:31 -0700607#ifdef CONFIG_MEMORY_HOTREMOVE
608 &dev_attr_valid_zones.attr,
609#endif
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500610 NULL
611};
612
Rikard Falkeborn5a576762021-05-28 23:34:08 +0200613static const struct attribute_group memory_memblk_attr_group = {
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500614 .attrs = memory_memblk_attrs,
615};
616
617static const struct attribute_group *memory_memblk_attr_groups[] = {
618 &memory_memblk_attr_group,
619 NULL,
620};
621
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700622static int __add_memory_block(struct memory_block *memory)
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500623{
Arvind Yadav085aa2d2018-04-26 21:12:09 +0530624 int ret;
625
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500626 memory->dev.bus = &memory_subsys;
627 memory->dev.id = memory->start_section_nr / sections_per_block;
628 memory->dev.release = memory_block_release;
629 memory->dev.groups = memory_memblk_attr_groups;
Linus Torvaldsf991fae2013-07-03 14:35:40 -0700630 memory->dev.offline = memory->state == MEM_OFFLINE;
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500631
Arvind Yadav085aa2d2018-04-26 21:12:09 +0530632 ret = device_register(&memory->dev);
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700633 if (ret) {
Arvind Yadav085aa2d2018-04-26 21:12:09 +0530634 put_device(&memory->dev);
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700635 return ret;
636 }
637 ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
638 GFP_KERNEL));
Christophe JAILLETf47f7582022-04-28 23:16:19 -0700639 if (ret)
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700640 device_unregister(&memory->dev);
Christophe JAILLETf47f7582022-04-28 23:16:19 -0700641
Arvind Yadav085aa2d2018-04-26 21:12:09 +0530642 return ret;
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500643}
644
David Hildenbrand395f6082022-03-22 14:47:31 -0700645static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
646 int nid)
647{
648 const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
649 const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
650 struct zone *zone, *matching_zone = NULL;
651 pg_data_t *pgdat = NODE_DATA(nid);
652 int i;
653
654 /*
655 * This logic only works for early memory, when the applicable zones
656 * already span the memory block. We don't expect overlapping zones on
657 * a single node for early memory. So if we're told that some PFNs
658 * of a node fall into this memory block, we can assume that all node
659 * zones that intersect with the memory block are actually applicable.
660 * No need to look at the memmap.
661 */
662 for (i = 0; i < MAX_NR_ZONES; i++) {
663 zone = pgdat->node_zones + i;
664 if (!populated_zone(zone))
665 continue;
666 if (!zone_intersects(zone, start_pfn, nr_pages))
667 continue;
668 if (!matching_zone) {
669 matching_zone = zone;
670 continue;
671 }
672 /* Spans multiple zones ... */
673 matching_zone = NULL;
674 break;
675 }
676 return matching_zone;
677}
678
679#ifdef CONFIG_NUMA
680/**
681 * memory_block_add_nid() - Indicate that system RAM falling into this memory
682 * block device (partially) belongs to the given node.
683 * @mem: The memory block device.
684 * @nid: The node id.
685 * @context: The memory initialization context.
686 *
687 * Indicate that system RAM falling into this memory block (partially) belongs
688 * to the given node. If the context indicates ("early") that we are adding the
689 * node during node device subsystem initialization, this will also properly
690 * set/adjust mem->zone based on the zone ranges of the given node.
691 */
692void memory_block_add_nid(struct memory_block *mem, int nid,
693 enum meminit_context context)
694{
695 if (context == MEMINIT_EARLY && mem->nid != nid) {
696 /*
697 * For early memory we have to determine the zone when setting
698 * the node id and handle multiple nodes spanning a single
699 * memory block by indicate via zone == NULL that we're not
700 * dealing with a single zone. So if we're setting the node id
701 * the first time, determine if there is a single zone. If we're
702 * setting the node id a second time to a different node,
703 * invalidate the single detected zone.
704 */
705 if (mem->nid == NUMA_NO_NODE)
706 mem->zone = early_node_zone_for_memory_block(mem, nid);
707 else
708 mem->zone = NULL;
709 }
710
711 /*
712 * If this memory block spans multiple nodes, we only indicate
713 * the last processed node. If we span multiple nodes (not applicable
714 * to hotplugged memory), zone == NULL will prohibit memory offlining
715 * and consequently unplug.
716 */
717 mem->nid = nid;
718}
719#endif
720
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700721static int add_memory_block(unsigned long block_id, unsigned long state,
722 unsigned long nr_vmemmap_pages,
723 struct memory_group *group)
Nathan Fontenote4619c82010-10-19 12:44:20 -0500724{
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600725 struct memory_block *mem;
Nathan Fontenote4619c82010-10-19 12:44:20 -0500726 int ret = 0;
727
David Hildenbranddd625282019-07-18 15:57:53 -0700728 mem = find_memory_block_by_id(block_id);
David Hildenbranddb051a02019-07-18 15:56:56 -0700729 if (mem) {
730 put_device(&mem->dev);
731 return -EEXIST;
732 }
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600733 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
Nathan Fontenote4619c82010-10-19 12:44:20 -0500734 if (!mem)
735 return -ENOMEM;
736
David Hildenbrand18115822019-07-18 15:56:46 -0700737 mem->start_section_nr = block_id * sections_per_block;
Nathan Fontenote4619c82010-10-19 12:44:20 -0500738 mem->state = state;
David Hildenbrandd84f2f52019-09-23 15:35:40 -0700739 mem->nid = NUMA_NO_NODE;
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700740 mem->nr_vmemmap_pages = nr_vmemmap_pages;
David Hildenbrand028fc572021-09-07 19:55:26 -0700741 INIT_LIST_HEAD(&mem->group_next);
742
David Hildenbrand395f6082022-03-22 14:47:31 -0700743#ifndef CONFIG_NUMA
744 if (state == MEM_ONLINE)
745 /*
746 * MEM_ONLINE at this point implies early memory. With NUMA,
747 * we'll determine the zone when setting the node id via
748 * memory_block_add_nid(). Memory hotplug updated the zone
749 * manually when memory onlining/offlining succeeds.
750 */
751 mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
752#endif /* CONFIG_NUMA */
753
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700754 ret = __add_memory_block(mem);
David Hildenbrand7ea0d2d2022-03-22 14:47:09 -0700755 if (ret)
756 return ret;
757
David Hildenbrand028fc572021-09-07 19:55:26 -0700758 if (group) {
759 mem->group = group;
760 list_add(&mem->group_next, &group->memory_blocks);
761 }
Nathan Fontenote4619c82010-10-19 12:44:20 -0500762
David Hildenbrand7ea0d2d2022-03-22 14:47:09 -0700763 return 0;
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600764}
765
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700766static int __init add_boot_memory_block(unsigned long base_section_nr)
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600767{
David Hildenbrand68c3a6a2020-04-06 20:06:40 -0700768 int section_count = 0;
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700769 unsigned long nr;
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600770
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700771 for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
772 nr++)
773 if (present_section_nr(nr))
David Hildenbrand18115822019-07-18 15:56:46 -0700774 section_count++;
Nathan Fontenot0c2c99b2011-01-20 10:43:34 -0600775
Seth Jenningscb5e39b2013-08-20 12:13:03 -0500776 if (section_count == 0)
777 return 0;
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700778 return add_memory_block(memory_block_id(base_section_nr),
779 MEM_ONLINE, 0, NULL);
Nathan Fontenote4619c82010-10-19 12:44:20 -0500780}
781
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700782static int add_hotplug_memory_block(unsigned long block_id,
783 unsigned long nr_vmemmap_pages,
784 struct memory_group *group)
785{
786 return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group);
787}
788
789static void remove_memory_block(struct memory_block *memory)
David Rientjes4edd7ce2013-04-29 15:08:22 -0700790{
David Hildenbranddb051a02019-07-18 15:56:56 -0700791 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
792 return;
David Rientjes4edd7ce2013-04-29 15:08:22 -0700793
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700794 WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
795
David Hildenbrand028fc572021-09-07 19:55:26 -0700796 if (memory->group) {
797 list_del(&memory->group_next);
798 memory->group = NULL;
799 }
800
David Hildenbrandcb7b3a32019-05-13 17:21:37 -0700801 /* drop the ref. we got via find_memory_block() */
Seth Jenningsdf2b7172013-08-20 12:12:59 -0500802 put_device(&memory->dev);
David Rientjes4edd7ce2013-04-29 15:08:22 -0700803 device_unregister(&memory->dev);
804}
805
David Hildenbranddb051a02019-07-18 15:56:56 -0700806/*
807 * Create memory block devices for the given memory area. Start and size
808 * have to be aligned to memory block granularity. Memory block devices
809 * will be initialized as offline.
David Hildenbrand848e19a2019-11-30 17:54:14 -0800810 *
811 * Called under device_hotplug_lock.
David Hildenbranddb051a02019-07-18 15:56:56 -0700812 */
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700813int create_memory_block_devices(unsigned long start, unsigned long size,
David Hildenbrand028fc572021-09-07 19:55:26 -0700814 unsigned long vmemmap_pages,
815 struct memory_group *group)
David Hildenbranddb051a02019-07-18 15:56:56 -0700816{
David Hildenbrand90ec010f2019-07-18 15:57:40 -0700817 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
818 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
David Hildenbranddb051a02019-07-18 15:56:56 -0700819 struct memory_block *mem;
820 unsigned long block_id;
821 int ret = 0;
822
823 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
824 !IS_ALIGNED(size, memory_block_size_bytes())))
825 return -EINVAL;
826
David Hildenbranddb051a02019-07-18 15:56:56 -0700827 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700828 ret = add_hotplug_memory_block(block_id, vmemmap_pages, group);
David Hildenbranddb051a02019-07-18 15:56:56 -0700829 if (ret)
830 break;
David Hildenbranddb051a02019-07-18 15:56:56 -0700831 }
832 if (ret) {
833 end_block_id = block_id;
834 for (block_id = start_block_id; block_id != end_block_id;
835 block_id++) {
David Hildenbranddd625282019-07-18 15:57:53 -0700836 mem = find_memory_block_by_id(block_id);
David Hildenbrand848e19a2019-11-30 17:54:14 -0800837 if (WARN_ON_ONCE(!mem))
838 continue;
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700839 remove_memory_block(mem);
David Hildenbranddb051a02019-07-18 15:56:56 -0700840 }
841 }
David Hildenbranddb051a02019-07-18 15:56:56 -0700842 return ret;
843}
844
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700845/*
846 * Remove memory block devices for the given memory area. Start and size
847 * have to be aligned to memory block granularity. Memory block devices
848 * have to be offline.
David Hildenbrand848e19a2019-11-30 17:54:14 -0800849 *
850 * Called under device_hotplug_lock.
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700851 */
852void remove_memory_block_devices(unsigned long start, unsigned long size)
Dave Hansen3947be12005-10-29 18:16:54 -0700853{
David Hildenbrand90ec010f2019-07-18 15:57:40 -0700854 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
855 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
Dave Hansen3947be12005-10-29 18:16:54 -0700856 struct memory_block *mem;
David Hildenbrand90ec010f2019-07-18 15:57:40 -0700857 unsigned long block_id;
Dave Hansen3947be12005-10-29 18:16:54 -0700858
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700859 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
860 !IS_ALIGNED(size, memory_block_size_bytes())))
David Hildenbrandcb7b3a32019-05-13 17:21:37 -0700861 return;
862
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700863 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
David Hildenbranddd625282019-07-18 15:57:53 -0700864 mem = find_memory_block_by_id(block_id);
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700865 if (WARN_ON_ONCE(!mem))
866 continue;
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700867 unregister_memory_block_under_nodes(mem);
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700868 remove_memory_block(mem);
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700869 }
Dave Hansen3947be12005-10-29 18:16:54 -0700870}
871
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -0800872/* return true if the memory block is offlined, otherwise, return false */
873bool is_memblock_offlined(struct memory_block *mem)
874{
875 return mem->state == MEM_OFFLINE;
876}
877
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500878static struct attribute *memory_root_attrs[] = {
879#ifdef CONFIG_ARCH_MEMORY_PROBE
880 &dev_attr_probe.attr,
881#endif
882
883#ifdef CONFIG_MEMORY_FAILURE
884 &dev_attr_soft_offline_page.attr,
885 &dev_attr_hard_offline_page.attr,
886#endif
887
888 &dev_attr_block_size_bytes.attr,
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700889 &dev_attr_auto_online_blocks.attr,
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500890 NULL
891};
892
Rikard Falkeborn5a576762021-05-28 23:34:08 +0200893static const struct attribute_group memory_root_attr_group = {
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500894 .attrs = memory_root_attrs,
895};
896
897static const struct attribute_group *memory_root_attr_groups[] = {
898 &memory_root_attr_group,
899 NULL,
900};
901
Wen Congyange90bdb72012-10-08 16:34:01 -0700902/*
David Hildenbrand848e19a2019-11-30 17:54:14 -0800903 * Initialize the sysfs support for memory devices. At the time this function
904 * is called, we cannot have concurrent creation/deletion of memory block
905 * devices, the device_hotplug_lock is not needed.
Dave Hansen3947be12005-10-29 18:16:54 -0700906 */
David Hildenbrand902ce63b2019-09-23 15:35:46 -0700907void __init memory_dev_init(void)
Dave Hansen3947be12005-10-29 18:16:54 -0700908{
Dave Hansen3947be12005-10-29 18:16:54 -0700909 int ret;
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700910 unsigned long block_sz, nr;
Dave Hansen3947be12005-10-29 18:16:54 -0700911
David Hildenbrand902ce63b2019-09-23 15:35:46 -0700912 /* Validate the configured memory block size */
913 block_sz = memory_block_size_bytes();
914 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
915 panic("Memory block size not suitable: 0x%lx\n", block_sz);
916 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
917
Nathan Fontenot96b2c0f2013-06-04 14:42:28 -0500918 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
Andrew Morton28ec24e2006-12-06 20:37:29 -0800919 if (ret)
David Hildenbrand848e19a2019-11-30 17:54:14 -0800920 panic("%s() failed to register subsystem: %d\n", __func__, ret);
Dave Hansen3947be12005-10-29 18:16:54 -0700921
922 /*
923 * Create entries for memory sections that were found
924 * during boot and have been initialized
925 */
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700926 for (nr = 0; nr <= __highest_present_section_nr;
927 nr += sections_per_block) {
David Hildenbrand2aa065f2022-03-22 14:47:34 -0700928 ret = add_boot_memory_block(nr);
David Hildenbrand848e19a2019-11-30 17:54:14 -0800929 if (ret)
930 panic("%s() failed to add memory block: %d\n", __func__,
931 ret);
Dave Hansen3947be12005-10-29 18:16:54 -0700932 }
Dave Hansen3947be12005-10-29 18:16:54 -0700933}
David Hildenbrandea884642019-07-18 15:57:50 -0700934
935/**
936 * walk_memory_blocks - walk through all present memory blocks overlapped
937 * by the range [start, start + size)
938 *
939 * @start: start address of the memory range
940 * @size: size of the memory range
941 * @arg: argument passed to func
942 * @func: callback for each memory section walked
943 *
944 * This function walks through all present memory blocks overlapped by the
945 * range [start, start + size), calling func on each memory block.
946 *
947 * In case func() returns an error, walking is aborted and the error is
948 * returned.
Scott Cheloha4fb6eab2020-06-03 16:03:48 -0700949 *
950 * Called under device_hotplug_lock.
David Hildenbrandea884642019-07-18 15:57:50 -0700951 */
952int walk_memory_blocks(unsigned long start, unsigned long size,
953 void *arg, walk_memory_blocks_func_t func)
954{
955 const unsigned long start_block_id = phys_to_block_id(start);
956 const unsigned long end_block_id = phys_to_block_id(start + size - 1);
957 struct memory_block *mem;
958 unsigned long block_id;
959 int ret = 0;
960
David Hildenbranddd625282019-07-18 15:57:53 -0700961 if (!size)
962 return 0;
963
David Hildenbrandea884642019-07-18 15:57:50 -0700964 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
David Hildenbranddd625282019-07-18 15:57:53 -0700965 mem = find_memory_block_by_id(block_id);
David Hildenbrandea884642019-07-18 15:57:50 -0700966 if (!mem)
967 continue;
968
969 ret = func(mem, arg);
970 put_device(&mem->dev);
971 if (ret)
972 break;
973 }
974 return ret;
975}
David Hildenbrand2c91f8f2019-11-15 17:34:57 -0800976
977struct for_each_memory_block_cb_data {
978 walk_memory_blocks_func_t func;
979 void *arg;
980};
981
982static int for_each_memory_block_cb(struct device *dev, void *data)
983{
984 struct memory_block *mem = to_memory_block(dev);
985 struct for_each_memory_block_cb_data *cb_data = data;
986
987 return cb_data->func(mem, cb_data->arg);
988}
989
990/**
991 * for_each_memory_block - walk through all present memory blocks
992 *
993 * @arg: argument passed to func
994 * @func: callback for each memory block walked
995 *
996 * This function walks through all present memory blocks, calling func on
997 * each memory block.
998 *
999 * In case func() returns an error, walking is aborted and the error is
1000 * returned.
1001 */
1002int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
1003{
1004 struct for_each_memory_block_cb_data cb_data = {
1005 .func = func,
1006 .arg = arg,
1007 };
1008
1009 return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
1010 for_each_memory_block_cb);
1011}
David Hildenbrand028fc572021-09-07 19:55:26 -07001012
1013/*
1014 * This is an internal helper to unify allocation and initialization of
1015 * memory groups. Note that the passed memory group will be copied to a
1016 * dynamically allocated memory group. After this call, the passed
1017 * memory group should no longer be used.
1018 */
1019static int memory_group_register(struct memory_group group)
1020{
1021 struct memory_group *new_group;
1022 uint32_t mgid;
1023 int ret;
1024
1025 if (!node_possible(group.nid))
1026 return -EINVAL;
1027
1028 new_group = kzalloc(sizeof(group), GFP_KERNEL);
1029 if (!new_group)
1030 return -ENOMEM;
1031 *new_group = group;
1032 INIT_LIST_HEAD(&new_group->memory_blocks);
1033
1034 ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
1035 GFP_KERNEL);
1036 if (ret) {
1037 kfree(new_group);
1038 return ret;
David Hildenbrand3fcebf92021-09-07 19:55:48 -07001039 } else if (group.is_dynamic) {
1040 xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
David Hildenbrand028fc572021-09-07 19:55:26 -07001041 }
1042 return mgid;
1043}
1044
1045/**
1046 * memory_group_register_static() - Register a static memory group.
1047 * @nid: The node id.
1048 * @max_pages: The maximum number of pages we'll have in this static memory
1049 * group.
1050 *
1051 * Register a new static memory group and return the memory group id.
1052 * All memory in the group belongs to a single unit, such as a DIMM. All
1053 * memory belonging to a static memory group is added in one go to be removed
1054 * in one go -- it's static.
1055 *
1056 * Returns an error if out of memory, if the node id is invalid, if no new
1057 * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
1058 * returns the new memory group id.
1059 */
1060int memory_group_register_static(int nid, unsigned long max_pages)
1061{
1062 struct memory_group group = {
1063 .nid = nid,
1064 .s = {
1065 .max_pages = max_pages,
1066 },
1067 };
1068
1069 if (!max_pages)
1070 return -EINVAL;
1071 return memory_group_register(group);
1072}
1073EXPORT_SYMBOL_GPL(memory_group_register_static);
1074
1075/**
1076 * memory_group_register_dynamic() - Register a dynamic memory group.
1077 * @nid: The node id.
1078 * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
1079 * memory group.
1080 *
1081 * Register a new dynamic memory group and return the memory group id.
1082 * Memory within a dynamic memory group is added/removed dynamically
1083 * in unit_pages.
1084 *
1085 * Returns an error if out of memory, if the node id is invalid, if no new
1086 * memory groups can be registered, or if unit_pages is invalid (0, not a
1087 * power of two, smaller than a single memory block). Otherwise, returns the
1088 * new memory group id.
1089 */
1090int memory_group_register_dynamic(int nid, unsigned long unit_pages)
1091{
1092 struct memory_group group = {
1093 .nid = nid,
1094 .is_dynamic = true,
1095 .d = {
1096 .unit_pages = unit_pages,
1097 },
1098 };
1099
1100 if (!unit_pages || !is_power_of_2(unit_pages) ||
1101 unit_pages < PHYS_PFN(memory_block_size_bytes()))
1102 return -EINVAL;
1103 return memory_group_register(group);
1104}
1105EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
1106
1107/**
1108 * memory_group_unregister() - Unregister a memory group.
1109 * @mgid: the memory group id
1110 *
1111 * Unregister a memory group. If any memory block still belongs to this
1112 * memory group, unregistering will fail.
1113 *
1114 * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
1115 * memory blocks still belong to this memory group and returns 0 if
1116 * unregistering succeeded.
1117 */
1118int memory_group_unregister(int mgid)
1119{
1120 struct memory_group *group;
1121
1122 if (mgid < 0)
1123 return -EINVAL;
1124
1125 group = xa_load(&memory_groups, mgid);
1126 if (!group)
1127 return -EINVAL;
1128 if (!list_empty(&group->memory_blocks))
1129 return -EBUSY;
1130 xa_erase(&memory_groups, mgid);
1131 kfree(group);
1132 return 0;
1133}
1134EXPORT_SYMBOL_GPL(memory_group_unregister);
1135
1136/*
1137 * This is an internal helper only to be used in core memory hotplug code to
1138 * lookup a memory group. We don't care about locking, as we don't expect a
1139 * memory group to get unregistered while adding memory to it -- because
1140 * the group and the memory is managed by the same driver.
1141 */
1142struct memory_group *memory_group_find_by_id(int mgid)
1143{
1144 return xa_load(&memory_groups, mgid);
1145}
David Hildenbrand3fcebf92021-09-07 19:55:48 -07001146
1147/*
1148 * This is an internal helper only to be used in core memory hotplug code to
1149 * walk all dynamic memory groups excluding a given memory group, either
1150 * belonging to a specific node, or belonging to any node.
1151 */
1152int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
1153 struct memory_group *excluded, void *arg)
1154{
1155 struct memory_group *group;
1156 unsigned long index;
1157 int ret = 0;
1158
1159 xa_for_each_marked(&memory_groups, index, group,
1160 MEMORY_GROUP_MARK_DYNAMIC) {
1161 if (group == excluded)
1162 continue;
1163#ifdef CONFIG_NUMA
1164 if (nid != NUMA_NO_NODE && group->nid != nid)
1165 continue;
1166#endif /* CONFIG_NUMA */
1167 ret = func(group, arg);
1168 if (ret)
1169 break;
1170 }
1171 return ret;
1172}