blob: 7a989eed3b18b32635ebca4f4460373f95532f49 [file] [log] [blame]
Kuninori Morimoto5933f6d2018-12-28 00:32:24 -08001// SPDX-License-Identifier: GPL-2.0
Paul Mundt05a11782007-06-07 11:29:37 +09002/*
3 * arch/sh/kernel/topology.c
4 *
5 * Copyright (C) 2007 Paul Mundt
Paul Mundt05a11782007-06-07 11:29:37 +09006 */
Paul Mundt7a302a92007-05-14 12:50:43 +09007#include <linux/cpu.h>
8#include <linux/cpumask.h>
9#include <linux/init.h>
10#include <linux/percpu.h>
Nobuhiro Iwamatsu6988d642011-08-26 05:05:41 +000011#include <linux/topology.h>
Paul Mundtd22d9b32007-06-01 14:21:13 +090012#include <linux/node.h>
13#include <linux/nodemask.h>
Paul Gortmakerf7be3452011-07-31 19:20:02 -040014#include <linux/export.h>
Paul Mundt7a302a92007-05-14 12:50:43 +090015
16static DEFINE_PER_CPU(struct cpu, cpu_devices);
17
Paul Mundt896f0c02009-10-16 18:00:02 +090018cpumask_t cpu_core_map[NR_CPUS];
Aurelien Jarno24ee7d72011-01-18 20:55:34 +000019EXPORT_SYMBOL(cpu_core_map);
Paul Mundt896f0c02009-10-16 18:00:02 +090020
Rich Felkerccc7d5a2016-03-29 22:50:02 +000021static cpumask_t cpu_coregroup_map(int cpu)
Paul Mundt896f0c02009-10-16 18:00:02 +090022{
23 /*
24 * Presently all SH-X3 SMP cores are multi-cores, so just keep it
25 * simple until we have a method for determining topology..
26 */
Rusty Russell004f4ce2012-02-15 15:28:04 +103027 return *cpu_possible_mask;
Paul Mundt896f0c02009-10-16 18:00:02 +090028}
29
Rich Felkerccc7d5a2016-03-29 22:50:02 +000030const struct cpumask *cpu_coregroup_mask(int cpu)
Paul Mundt896f0c02009-10-16 18:00:02 +090031{
32 return &cpu_core_map[cpu];
33}
34
35int arch_update_cpu_topology(void)
36{
37 unsigned int cpu;
38
39 for_each_possible_cpu(cpu)
40 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
41
42 return 0;
43}
44
Paul Mundt7a302a92007-05-14 12:50:43 +090045static int __init topology_init(void)
46{
47 int i, ret;
48
Paul Mundtd22d9b32007-06-01 14:21:13 +090049#ifdef CONFIG_NEED_MULTIPLE_NODES
50 for_each_online_node(i)
51 register_one_node(i);
52#endif
53
Paul Mundt7a302a92007-05-14 12:50:43 +090054 for_each_present_cpu(i) {
Paul Mundt9e8c5be2010-04-26 18:20:29 +090055 struct cpu *c = &per_cpu(cpu_devices, i);
56
57 c->hotpluggable = 1;
58
59 ret = register_cpu(c, i);
Paul Mundt7a302a92007-05-14 12:50:43 +090060 if (unlikely(ret))
61 printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
Harvey Harrison866e6b92008-03-04 15:23:47 -080062 __func__, i, ret);
Paul Mundt7a302a92007-05-14 12:50:43 +090063 }
64
Paul Mundt05a11782007-06-07 11:29:37 +090065#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
66 /*
67 * In the UP case, make sure the CPU association is still
68 * registered under each node. Without this, sysfs fails
69 * to make the connection between nodes other than node0
70 * and cpu0.
71 */
72 for_each_online_node(i)
73 if (i != numa_node_id())
74 register_cpu_under_node(raw_smp_processor_id(), i);
75#endif
76
Paul Mundt7a302a92007-05-14 12:50:43 +090077 return 0;
78}
79subsys_initcall(topology_init);