| /* |
| * SMP support for ppc. |
| * |
| * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great |
| * deal of code from the sparc and intel versions. |
| * |
| * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
| * |
| * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and |
| * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com |
| * |
| * This program is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU General Public License |
| * as published by the Free Software Foundation; either version |
| * 2 of the License, or (at your option) any later version. |
| */ |
| |
| #undef DEBUG |
| |
| #include <linux/kernel.h> |
| #include <linux/module.h> |
| #include <linux/sched.h> |
| #include <linux/smp.h> |
| #include <linux/interrupt.h> |
| #include <linux/delay.h> |
| #include <linux/init.h> |
| #include <linux/spinlock.h> |
| #include <linux/cache.h> |
| #include <linux/err.h> |
| #include <linux/sysdev.h> |
| #include <linux/cpu.h> |
| #include <linux/notifier.h> |
| #include <linux/topology.h> |
| |
| #include <asm/ptrace.h> |
| #include <asm/atomic.h> |
| #include <asm/irq.h> |
| #include <asm/page.h> |
| #include <asm/pgtable.h> |
| #include <asm/prom.h> |
| #include <asm/smp.h> |
| #include <asm/time.h> |
| #include <asm/machdep.h> |
| #include <asm/cputable.h> |
| #include <asm/system.h> |
| #include <asm/mpic.h> |
| #include <asm/vdso_datapage.h> |
| #ifdef CONFIG_PPC64 |
| #include <asm/paca.h> |
| #endif |
| |
| #ifdef DEBUG |
| #include <asm/udbg.h> |
| #define DBG(fmt...) udbg_printf(fmt) |
| #else |
| #define DBG(fmt...) |
| #endif |
| |
| int smp_hw_index[NR_CPUS]; |
| struct thread_info *secondary_ti; |
| |
| cpumask_t cpu_possible_map = CPU_MASK_NONE; |
| cpumask_t cpu_online_map = CPU_MASK_NONE; |
| DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
| |
| EXPORT_SYMBOL(cpu_online_map); |
| EXPORT_SYMBOL(cpu_possible_map); |
| EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| |
| /* SMP operations for this machine */ |
| struct smp_ops_t *smp_ops; |
| |
| static volatile unsigned int cpu_callin_map[NR_CPUS]; |
| |
| void smp_call_function_interrupt(void); |
| |
| int smt_enabled_at_boot = 1; |
| |
| static int ipi_fail_ok; |
| |
| static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
| |
| #ifdef CONFIG_PPC64 |
| void __devinit smp_generic_kick_cpu(int nr) |
| { |
| BUG_ON(nr < 0 || nr >= NR_CPUS); |
| |
| /* |
| * The processor is currently spinning, waiting for the |
| * cpu_start field to become non-zero After we set cpu_start, |
| * the processor will continue on to secondary_start |
| */ |
| paca[nr].cpu_start = 1; |
| smp_mb(); |
| } |
| #endif |
| |
| void smp_message_recv(int msg) |
| { |
| switch(msg) { |
| case PPC_MSG_CALL_FUNCTION: |
| smp_call_function_interrupt(); |
| break; |
| case PPC_MSG_RESCHEDULE: |
| /* XXX Do we have to do this? */ |
| set_need_resched(); |
| break; |
| case PPC_MSG_DEBUGGER_BREAK: |
| if (crash_ipi_function_ptr) { |
| crash_ipi_function_ptr(get_irq_regs()); |
| break; |
| } |
| #ifdef CONFIG_DEBUGGER |
| debugger_ipi(get_irq_regs()); |
| break; |
| #endif /* CONFIG_DEBUGGER */ |
| /* FALLTHROUGH */ |
| default: |
| printk("SMP %d: smp_message_recv(): unknown msg %d\n", |
| smp_processor_id(), msg); |
| break; |
| } |
| } |
| |
| void smp_send_reschedule(int cpu) |
| { |
| if (likely(smp_ops)) |
| smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); |
| } |
| |
| #ifdef CONFIG_DEBUGGER |
| void smp_send_debugger_break(int cpu) |
| { |
| if (likely(smp_ops)) |
| smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); |
| } |
| #endif |
| |
| #ifdef CONFIG_KEXEC |
| void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
| { |
| crash_ipi_function_ptr = crash_ipi_callback; |
| if (crash_ipi_callback && smp_ops) { |
| mb(); |
| smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); |
| } |
| } |
| #endif |
| |
| static void stop_this_cpu(void *dummy) |
| { |
| local_irq_disable(); |
| while (1) |
| ; |
| } |
| |
| /* |
| * Structure and data for smp_call_function(). This is designed to minimise |
| * static memory requirements. It also looks cleaner. |
| * Stolen from the i386 version. |
| */ |
| static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); |
| |
| static struct call_data_struct { |
| void (*func) (void *info); |
| void *info; |
| atomic_t started; |
| atomic_t finished; |
| int wait; |
| } *call_data; |
| |
| /* delay of at least 8 seconds */ |
| #define SMP_CALL_TIMEOUT 8 |
| |
| /* |
| * These functions send a 'generic call function' IPI to other online |
| * CPUS in the system. |
| * |
| * [SUMMARY] Run a function on other CPUs. |
| * <func> The function to run. This must be fast and non-blocking. |
| * <info> An arbitrary pointer to pass to the function. |
| * <nonatomic> currently unused. |
| * <wait> If true, wait (atomically) until function has completed on other CPUs. |
| * [RETURNS] 0 on success, else a negative status code. Does not return until |
| * remote CPUs are nearly ready to execute <<func>> or are or have executed. |
| * <map> is a cpu map of the cpus to send IPI to. |
| * |
| * You must not call this function with disabled interrupts or from a |
| * hardware interrupt handler or from a bottom half handler. |
| */ |
| static int __smp_call_function_map(void (*func) (void *info), void *info, |
| int nonatomic, int wait, cpumask_t map) |
| { |
| struct call_data_struct data; |
| int ret = -1, num_cpus; |
| int cpu; |
| u64 timeout; |
| |
| if (unlikely(smp_ops == NULL)) |
| return ret; |
| |
| data.func = func; |
| data.info = info; |
| atomic_set(&data.started, 0); |
| data.wait = wait; |
| if (wait) |
| atomic_set(&data.finished, 0); |
| |
| /* remove 'self' from the map */ |
| if (cpu_isset(smp_processor_id(), map)) |
| cpu_clear(smp_processor_id(), map); |
| |
| /* sanity check the map, remove any non-online processors. */ |
| cpus_and(map, map, cpu_online_map); |
| |
| num_cpus = cpus_weight(map); |
| if (!num_cpus) |
| goto done; |
| |
| call_data = &data; |
| smp_wmb(); |
| /* Send a message to all CPUs in the map */ |
| for_each_cpu_mask(cpu, map) |
| smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
| |
| timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; |
| |
| /* Wait for indication that they have received the message */ |
| while (atomic_read(&data.started) != num_cpus) { |
| HMT_low(); |
| if (get_tb() >= timeout) { |
| printk("smp_call_function on cpu %d: other cpus not " |
| "responding (%d)\n", smp_processor_id(), |
| atomic_read(&data.started)); |
| if (!ipi_fail_ok) |
| debugger(NULL); |
| goto out; |
| } |
| } |
| |
| /* optionally wait for the CPUs to complete */ |
| if (wait) { |
| while (atomic_read(&data.finished) != num_cpus) { |
| HMT_low(); |
| if (get_tb() >= timeout) { |
| printk("smp_call_function on cpu %d: other " |
| "cpus not finishing (%d/%d)\n", |
| smp_processor_id(), |
| atomic_read(&data.finished), |
| atomic_read(&data.started)); |
| debugger(NULL); |
| goto out; |
| } |
| } |
| } |
| |
| done: |
| ret = 0; |
| |
| out: |
| call_data = NULL; |
| HMT_medium(); |
| return ret; |
| } |
| |
| static int __smp_call_function(void (*func)(void *info), void *info, |
| int nonatomic, int wait) |
| { |
| int ret; |
| spin_lock(&call_lock); |
| ret =__smp_call_function_map(func, info, nonatomic, wait, |
| cpu_online_map); |
| spin_unlock(&call_lock); |
| return ret; |
| } |
| |
| int smp_call_function(void (*func) (void *info), void *info, int nonatomic, |
| int wait) |
| { |
| /* Can deadlock when called with interrupts disabled */ |
| WARN_ON(irqs_disabled()); |
| |
| return __smp_call_function(func, info, nonatomic, wait); |
| } |
| EXPORT_SYMBOL(smp_call_function); |
| |
| int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
| int nonatomic, int wait) |
| { |
| cpumask_t map = CPU_MASK_NONE; |
| int ret = 0; |
| |
| /* Can deadlock when called with interrupts disabled */ |
| WARN_ON(irqs_disabled()); |
| |
| if (!cpu_online(cpu)) |
| return -EINVAL; |
| |
| cpu_set(cpu, map); |
| if (cpu != get_cpu()) { |
| spin_lock(&call_lock); |
| ret = __smp_call_function_map(func, info, nonatomic, wait, map); |
| spin_unlock(&call_lock); |
| } else { |
| local_irq_disable(); |
| func(info); |
| local_irq_enable(); |
| } |
| put_cpu(); |
| return ret; |
| } |
| EXPORT_SYMBOL(smp_call_function_single); |
| |
| void smp_send_stop(void) |
| { |
| int nolock; |
| |
| /* It's OK to fail sending the IPI, since the alternative is to |
| * be stuck forever waiting on the other CPU to take the interrupt. |
| * |
| * It's better to at least continue and go through reboot, since this |
| * function is usually called at panic or reboot time in the first |
| * place. |
| */ |
| ipi_fail_ok = 1; |
| |
| /* Don't deadlock in case we got called through panic */ |
| nolock = !spin_trylock(&call_lock); |
| __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map); |
| if (!nolock) |
| spin_unlock(&call_lock); |
| } |
| |
| void smp_call_function_interrupt(void) |
| { |
| void (*func) (void *info); |
| void *info; |
| int wait; |
| |
| /* call_data will be NULL if the sender timed out while |
| * waiting on us to receive the call. |
| */ |
| if (!call_data) |
| return; |
| |
| func = call_data->func; |
| info = call_data->info; |
| wait = call_data->wait; |
| |
| if (!wait) |
| smp_mb__before_atomic_inc(); |
| |
| /* |
| * Notify initiating CPU that I've grabbed the data and am |
| * about to execute the function |
| */ |
| atomic_inc(&call_data->started); |
| /* |
| * At this point the info structure may be out of scope unless wait==1 |
| */ |
| (*func)(info); |
| if (wait) { |
| smp_mb__before_atomic_inc(); |
| atomic_inc(&call_data->finished); |
| } |
| } |
| |
| extern struct gettimeofday_struct do_gtod; |
| |
| struct thread_info *current_set[NR_CPUS]; |
| |
| DECLARE_PER_CPU(unsigned int, pvr); |
| |
| static void __devinit smp_store_cpu_info(int id) |
| { |
| per_cpu(pvr, id) = mfspr(SPRN_PVR); |
| } |
| |
| static void __init smp_create_idle(unsigned int cpu) |
| { |
| struct task_struct *p; |
| |
| /* create a process for the processor */ |
| p = fork_idle(cpu); |
| if (IS_ERR(p)) |
| panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); |
| #ifdef CONFIG_PPC64 |
| paca[cpu].__current = p; |
| #endif |
| current_set[cpu] = task_thread_info(p); |
| task_thread_info(p)->cpu = cpu; |
| } |
| |
| void __init smp_prepare_cpus(unsigned int max_cpus) |
| { |
| unsigned int cpu; |
| |
| DBG("smp_prepare_cpus\n"); |
| |
| /* |
| * setup_cpu may need to be called on the boot cpu. We havent |
| * spun any cpus up but lets be paranoid. |
| */ |
| BUG_ON(boot_cpuid != smp_processor_id()); |
| |
| /* Fixup boot cpu */ |
| smp_store_cpu_info(boot_cpuid); |
| cpu_callin_map[boot_cpuid] = 1; |
| |
| if (smp_ops) |
| max_cpus = smp_ops->probe(); |
| else |
| max_cpus = 1; |
| |
| smp_space_timers(max_cpus); |
| |
| for_each_possible_cpu(cpu) |
| if (cpu != boot_cpuid) |
| smp_create_idle(cpu); |
| } |
| |
| void __devinit smp_prepare_boot_cpu(void) |
| { |
| BUG_ON(smp_processor_id() != boot_cpuid); |
| |
| cpu_set(boot_cpuid, cpu_online_map); |
| #ifdef CONFIG_PPC64 |
| paca[boot_cpuid].__current = current; |
| #endif |
| current_set[boot_cpuid] = task_thread_info(current); |
| } |
| |
| #ifdef CONFIG_HOTPLUG_CPU |
| /* State of each CPU during hotplug phases */ |
| DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
| |
| int generic_cpu_disable(void) |
| { |
| unsigned int cpu = smp_processor_id(); |
| |
| if (cpu == boot_cpuid) |
| return -EBUSY; |
| |
| cpu_clear(cpu, cpu_online_map); |
| #ifdef CONFIG_PPC64 |
| vdso_data->processorCount--; |
| fixup_irqs(cpu_online_map); |
| #endif |
| return 0; |
| } |
| |
| int generic_cpu_enable(unsigned int cpu) |
| { |
| /* Do the normal bootup if we haven't |
| * already bootstrapped. */ |
| if (system_state != SYSTEM_RUNNING) |
| return -ENOSYS; |
| |
| /* get the target out of it's holding state */ |
| per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
| smp_wmb(); |
| |
| while (!cpu_online(cpu)) |
| cpu_relax(); |
| |
| #ifdef CONFIG_PPC64 |
| fixup_irqs(cpu_online_map); |
| /* counter the irq disable in fixup_irqs */ |
| local_irq_enable(); |
| #endif |
| return 0; |
| } |
| |
| void generic_cpu_die(unsigned int cpu) |
| { |
| int i; |
| |
| for (i = 0; i < 100; i++) { |
| smp_rmb(); |
| if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
| return; |
| msleep(100); |
| } |
| printk(KERN_ERR "CPU%d didn't die...\n", cpu); |
| } |
| |
| void generic_mach_cpu_die(void) |
| { |
| unsigned int cpu; |
| |
| local_irq_disable(); |
| cpu = smp_processor_id(); |
| printk(KERN_DEBUG "CPU%d offline\n", cpu); |
| __get_cpu_var(cpu_state) = CPU_DEAD; |
| smp_wmb(); |
| while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
| cpu_relax(); |
| cpu_set(cpu, cpu_online_map); |
| local_irq_enable(); |
| } |
| #endif |
| |
| static int __devinit cpu_enable(unsigned int cpu) |
| { |
| if (smp_ops && smp_ops->cpu_enable) |
| return smp_ops->cpu_enable(cpu); |
| |
| return -ENOSYS; |
| } |
| |
| int __cpuinit __cpu_up(unsigned int cpu) |
| { |
| int c; |
| |
| secondary_ti = current_set[cpu]; |
| if (!cpu_enable(cpu)) |
| return 0; |
| |
| if (smp_ops == NULL || |
| (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) |
| return -EINVAL; |
| |
| /* Make sure callin-map entry is 0 (can be leftover a CPU |
| * hotplug |
| */ |
| cpu_callin_map[cpu] = 0; |
| |
| /* The information for processor bringup must |
| * be written out to main store before we release |
| * the processor. |
| */ |
| smp_mb(); |
| |
| /* wake up cpus */ |
| DBG("smp: kicking cpu %d\n", cpu); |
| smp_ops->kick_cpu(cpu); |
| |
| /* |
| * wait to see if the cpu made a callin (is actually up). |
| * use this value that I found through experimentation. |
| * -- Cort |
| */ |
| if (system_state < SYSTEM_RUNNING) |
| for (c = 50000; c && !cpu_callin_map[cpu]; c--) |
| udelay(100); |
| #ifdef CONFIG_HOTPLUG_CPU |
| else |
| /* |
| * CPUs can take much longer to come up in the |
| * hotplug case. Wait five seconds. |
| */ |
| for (c = 25; c && !cpu_callin_map[cpu]; c--) { |
| msleep(200); |
| } |
| #endif |
| |
| if (!cpu_callin_map[cpu]) { |
| printk("Processor %u is stuck.\n", cpu); |
| return -ENOENT; |
| } |
| |
| printk("Processor %u found.\n", cpu); |
| |
| if (smp_ops->give_timebase) |
| smp_ops->give_timebase(); |
| |
| /* Wait until cpu puts itself in the online map */ |
| while (!cpu_online(cpu)) |
| cpu_relax(); |
| |
| return 0; |
| } |
| |
| |
| /* Activate a secondary processor. */ |
| int __devinit start_secondary(void *unused) |
| { |
| unsigned int cpu = smp_processor_id(); |
| |
| atomic_inc(&init_mm.mm_count); |
| current->active_mm = &init_mm; |
| |
| smp_store_cpu_info(cpu); |
| set_dec(tb_ticks_per_jiffy); |
| preempt_disable(); |
| cpu_callin_map[cpu] = 1; |
| |
| smp_ops->setup_cpu(cpu); |
| if (smp_ops->take_timebase) |
| smp_ops->take_timebase(); |
| |
| if (system_state > SYSTEM_BOOTING) |
| snapshot_timebase(); |
| |
| secondary_cpu_time_init(); |
| |
| spin_lock(&call_lock); |
| cpu_set(cpu, cpu_online_map); |
| spin_unlock(&call_lock); |
| |
| local_irq_enable(); |
| |
| cpu_idle(); |
| return 0; |
| } |
| |
| int setup_profiling_timer(unsigned int multiplier) |
| { |
| return 0; |
| } |
| |
| void __init smp_cpus_done(unsigned int max_cpus) |
| { |
| cpumask_t old_mask; |
| |
| /* We want the setup_cpu() here to be called from CPU 0, but our |
| * init thread may have been "borrowed" by another CPU in the meantime |
| * se we pin us down to CPU 0 for a short while |
| */ |
| old_mask = current->cpus_allowed; |
| set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); |
| |
| if (smp_ops) |
| smp_ops->setup_cpu(boot_cpuid); |
| |
| set_cpus_allowed(current, old_mask); |
| |
| snapshot_timebases(); |
| |
| dump_numa_cpu_topology(); |
| } |
| |
| #ifdef CONFIG_HOTPLUG_CPU |
| int __cpu_disable(void) |
| { |
| if (smp_ops->cpu_disable) |
| return smp_ops->cpu_disable(); |
| |
| return -ENOSYS; |
| } |
| |
| void __cpu_die(unsigned int cpu) |
| { |
| if (smp_ops->cpu_die) |
| smp_ops->cpu_die(cpu); |
| } |
| #endif |