| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * Author: Huacai Chen <chenhuacai@loongson.cn> |
| * Copyright (C) 2020-2022 Loongson Technology Corporation Limited |
| * |
| * Derived from MIPS: |
| * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. |
| * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) |
| * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| * Copyright (C) 2004 Thiemo Seufer |
| * Copyright (C) 2013 Imagination Technologies Ltd. |
| */ |
| #include <linux/cpu.h> |
| #include <linux/init.h> |
| #include <linux/kernel.h> |
| #include <linux/errno.h> |
| #include <linux/sched.h> |
| #include <linux/sched/debug.h> |
| #include <linux/sched/task.h> |
| #include <linux/sched/task_stack.h> |
| #include <linux/mm.h> |
| #include <linux/stddef.h> |
| #include <linux/unistd.h> |
| #include <linux/export.h> |
| #include <linux/ptrace.h> |
| #include <linux/mman.h> |
| #include <linux/personality.h> |
| #include <linux/sys.h> |
| #include <linux/completion.h> |
| #include <linux/kallsyms.h> |
| #include <linux/random.h> |
| #include <linux/prctl.h> |
| #include <linux/nmi.h> |
| |
| #include <asm/asm.h> |
| #include <asm/bootinfo.h> |
| #include <asm/cpu.h> |
| #include <asm/elf.h> |
| #include <asm/fpu.h> |
| #include <asm/io.h> |
| #include <asm/irq.h> |
| #include <asm/irq_regs.h> |
| #include <asm/loongarch.h> |
| #include <asm/pgtable.h> |
| #include <asm/processor.h> |
| #include <asm/reg.h> |
| #include <asm/vdso.h> |
| |
| /* |
| * Idle related variables and functions |
| */ |
| |
| unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
| EXPORT_SYMBOL(boot_option_idle_override); |
| |
| #ifdef CONFIG_HOTPLUG_CPU |
| void arch_cpu_idle_dead(void) |
| { |
| play_dead(); |
| } |
| #endif |
| |
| asmlinkage void ret_from_fork(void); |
| asmlinkage void ret_from_kernel_thread(void); |
| |
| void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) |
| { |
| unsigned long crmd; |
| unsigned long prmd; |
| unsigned long euen; |
| |
| /* New thread loses kernel privileges. */ |
| crmd = regs->csr_crmd & ~(PLV_MASK); |
| crmd |= PLV_USER; |
| regs->csr_crmd = crmd; |
| |
| prmd = regs->csr_prmd & ~(PLV_MASK); |
| prmd |= PLV_USER; |
| regs->csr_prmd = prmd; |
| |
| euen = regs->csr_euen & ~(CSR_EUEN_FPEN); |
| regs->csr_euen = euen; |
| lose_fpu(0); |
| |
| clear_thread_flag(TIF_LSX_CTX_LIVE); |
| clear_thread_flag(TIF_LASX_CTX_LIVE); |
| clear_used_math(); |
| regs->csr_era = pc; |
| regs->regs[3] = sp; |
| } |
| |
| void exit_thread(struct task_struct *tsk) |
| { |
| } |
| |
| int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| { |
| /* |
| * Save any process state which is live in hardware registers to the |
| * parent context prior to duplication. This prevents the new child |
| * state becoming stale if the parent is preempted before copy_thread() |
| * gets a chance to save the parent's live hardware registers to the |
| * child context. |
| */ |
| preempt_disable(); |
| |
| if (is_fpu_owner()) |
| save_fp(current); |
| |
| preempt_enable(); |
| |
| if (used_math()) |
| memcpy(dst, src, sizeof(struct task_struct)); |
| else |
| memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr)); |
| |
| return 0; |
| } |
| |
| /* |
| * Copy architecture-specific thread state |
| */ |
| int copy_thread(unsigned long clone_flags, unsigned long usp, |
| unsigned long kthread_arg, struct task_struct *p, unsigned long tls) |
| { |
| unsigned long childksp; |
| struct pt_regs *childregs, *regs = current_pt_regs(); |
| |
| childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; |
| |
| /* set up new TSS. */ |
| childregs = (struct pt_regs *) childksp - 1; |
| /* Put the stack after the struct pt_regs. */ |
| childksp = (unsigned long) childregs; |
| p->thread.csr_euen = 0; |
| p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD); |
| p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD); |
| p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG); |
| if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { |
| /* kernel thread */ |
| p->thread.reg23 = usp; /* fn */ |
| p->thread.reg24 = kthread_arg; |
| p->thread.reg03 = childksp; |
| p->thread.reg01 = (unsigned long) ret_from_kernel_thread; |
| memset(childregs, 0, sizeof(struct pt_regs)); |
| childregs->csr_euen = p->thread.csr_euen; |
| childregs->csr_crmd = p->thread.csr_crmd; |
| childregs->csr_prmd = p->thread.csr_prmd; |
| childregs->csr_ecfg = p->thread.csr_ecfg; |
| return 0; |
| } |
| |
| /* user thread */ |
| *childregs = *regs; |
| childregs->regs[4] = 0; /* Child gets zero as return value */ |
| if (usp) |
| childregs->regs[3] = usp; |
| |
| p->thread.reg03 = (unsigned long) childregs; |
| p->thread.reg01 = (unsigned long) ret_from_fork; |
| |
| /* |
| * New tasks lose permission to use the fpu. This accelerates context |
| * switching for most programs since they don't use the fpu. |
| */ |
| childregs->csr_euen = 0; |
| |
| clear_tsk_thread_flag(p, TIF_USEDFPU); |
| clear_tsk_thread_flag(p, TIF_USEDSIMD); |
| clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE); |
| clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE); |
| |
| if (clone_flags & CLONE_SETTLS) |
| childregs->regs[2] = tls; |
| |
| return 0; |
| } |
| |
| unsigned long __get_wchan(struct task_struct *task) |
| { |
| return 0; |
| } |
| |
| unsigned long stack_top(void) |
| { |
| unsigned long top = TASK_SIZE & PAGE_MASK; |
| |
| /* Space for the VDSO & data page */ |
| top -= PAGE_ALIGN(current->thread.vdso->size); |
| top -= PAGE_SIZE; |
| |
| /* Space to randomize the VDSO base */ |
| if (current->flags & PF_RANDOMIZE) |
| top -= VDSO_RANDOMIZE_SIZE; |
| |
| return top; |
| } |
| |
| /* |
| * Don't forget that the stack pointer must be aligned on a 8 bytes |
| * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. |
| */ |
| unsigned long arch_align_stack(unsigned long sp) |
| { |
| if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
| sp -= get_random_int() & ~PAGE_MASK; |
| |
| return sp & STACK_ALIGN; |
| } |
| |
| static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); |
| static struct cpumask backtrace_csd_busy; |
| |
| static void handle_backtrace(void *info) |
| { |
| nmi_cpu_backtrace(get_irq_regs()); |
| cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); |
| } |
| |
| static void raise_backtrace(cpumask_t *mask) |
| { |
| call_single_data_t *csd; |
| int cpu; |
| |
| for_each_cpu(cpu, mask) { |
| /* |
| * If we previously sent an IPI to the target CPU & it hasn't |
| * cleared its bit in the busy cpumask then it didn't handle |
| * our previous IPI & it's not safe for us to reuse the |
| * call_single_data_t. |
| */ |
| if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { |
| pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", |
| cpu); |
| continue; |
| } |
| |
| csd = &per_cpu(backtrace_csd, cpu); |
| csd->func = handle_backtrace; |
| smp_call_function_single_async(cpu, csd); |
| } |
| } |
| |
| void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) |
| { |
| nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); |
| } |
| |
| #ifdef CONFIG_64BIT |
| void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) |
| { |
| unsigned int i; |
| |
| for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) { |
| uregs[i] = regs->regs[i - LOONGARCH_EF_R0]; |
| } |
| |
| uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0; |
| uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era; |
| uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr; |
| uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd; |
| uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd; |
| uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen; |
| uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg; |
| uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat; |
| } |
| #endif /* CONFIG_64BIT */ |