| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Hibernation support for RISCV |
| * |
| * Copyright (C) 2023 StarFive Technology Co., Ltd. |
| * |
| * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com> |
| */ |
| |
| #include <asm/barrier.h> |
| #include <asm/cacheflush.h> |
| #include <asm/mmu_context.h> |
| #include <asm/page.h> |
| #include <asm/pgalloc.h> |
| #include <asm/pgtable.h> |
| #include <asm/sections.h> |
| #include <asm/set_memory.h> |
| #include <asm/smp.h> |
| #include <asm/suspend.h> |
| |
| #include <linux/cpu.h> |
| #include <linux/memblock.h> |
| #include <linux/pm.h> |
| #include <linux/sched.h> |
| #include <linux/suspend.h> |
| #include <linux/utsname.h> |
| |
| /* The logical cpu number we should resume on, initialised to a non-cpu number. */ |
| static int sleep_cpu = -EINVAL; |
| |
| /* Pointer to the temporary resume page table. */ |
| static pgd_t *resume_pg_dir; |
| |
| /* CPU context to be saved. */ |
| struct suspend_context *hibernate_cpu_context; |
| EXPORT_SYMBOL_GPL(hibernate_cpu_context); |
| |
| unsigned long relocated_restore_code; |
| EXPORT_SYMBOL_GPL(relocated_restore_code); |
| |
| /** |
| * struct arch_hibernate_hdr_invariants - container to store kernel build version. |
| * @uts_version: to save the build number and date so that we do not resume with |
| * a different kernel. |
| */ |
| struct arch_hibernate_hdr_invariants { |
| char uts_version[__NEW_UTS_LEN + 1]; |
| }; |
| |
| /** |
| * struct arch_hibernate_hdr - helper parameters that help us to restore the image. |
| * @invariants: container to store kernel build version. |
| * @hartid: to make sure same boot_cpu executes the hibernate/restore code. |
| * @saved_satp: original page table used by the hibernated image. |
| * @restore_cpu_addr: the kernel's image address to restore the CPU context. |
| */ |
| static struct arch_hibernate_hdr { |
| struct arch_hibernate_hdr_invariants invariants; |
| unsigned long hartid; |
| unsigned long saved_satp; |
| unsigned long restore_cpu_addr; |
| } resume_hdr; |
| |
| static void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) |
| { |
| memset(i, 0, sizeof(*i)); |
| memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); |
| } |
| |
| /* |
| * Check if the given pfn is in the 'nosave' section. |
| */ |
| int pfn_is_nosave(unsigned long pfn) |
| { |
| unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); |
| unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); |
| |
| return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)); |
| } |
| |
| void notrace save_processor_state(void) |
| { |
| } |
| |
| void notrace restore_processor_state(void) |
| { |
| } |
| |
| /* |
| * Helper parameters need to be saved to the hibernation image header. |
| */ |
| int arch_hibernation_header_save(void *addr, unsigned int max_size) |
| { |
| struct arch_hibernate_hdr *hdr = addr; |
| |
| if (max_size < sizeof(*hdr)) |
| return -EOVERFLOW; |
| |
| arch_hdr_invariants(&hdr->invariants); |
| |
| hdr->hartid = cpuid_to_hartid_map(sleep_cpu); |
| hdr->saved_satp = csr_read(CSR_SATP); |
| hdr->restore_cpu_addr = (unsigned long)__hibernate_cpu_resume; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(arch_hibernation_header_save); |
| |
| /* |
| * Retrieve the helper parameters from the hibernation image header. |
| */ |
| int arch_hibernation_header_restore(void *addr) |
| { |
| struct arch_hibernate_hdr_invariants invariants; |
| struct arch_hibernate_hdr *hdr = addr; |
| int ret = 0; |
| |
| arch_hdr_invariants(&invariants); |
| |
| if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { |
| pr_crit("Hibernate image not generated by this kernel!\n"); |
| return -EINVAL; |
| } |
| |
| sleep_cpu = riscv_hartid_to_cpuid(hdr->hartid); |
| if (sleep_cpu < 0) { |
| pr_crit("Hibernated on a CPU not known to this kernel!\n"); |
| sleep_cpu = -EINVAL; |
| return -EINVAL; |
| } |
| |
| #ifdef CONFIG_SMP |
| ret = bringup_hibernate_cpu(sleep_cpu); |
| if (ret) { |
| sleep_cpu = -EINVAL; |
| return ret; |
| } |
| #endif |
| resume_hdr = *hdr; |
| |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(arch_hibernation_header_restore); |
| |
| int swsusp_arch_suspend(void) |
| { |
| int ret = 0; |
| |
| if (__cpu_suspend_enter(hibernate_cpu_context)) { |
| sleep_cpu = smp_processor_id(); |
| suspend_save_csrs(hibernate_cpu_context); |
| ret = swsusp_save(); |
| } else { |
| suspend_restore_csrs(hibernate_cpu_context); |
| flush_tlb_all(); |
| flush_icache_all(); |
| |
| /* |
| * Tell the hibernation core that we've just restored the memory. |
| */ |
| in_suspend = 0; |
| sleep_cpu = -EINVAL; |
| } |
| |
| return ret; |
| } |
| |
| static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start, |
| unsigned long end, pgprot_t prot) |
| { |
| pte_t *src_ptep; |
| pte_t *dst_ptep; |
| |
| if (pmd_none(READ_ONCE(*dst_pmdp))) { |
| dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC); |
| if (!dst_ptep) |
| return -ENOMEM; |
| |
| pmd_populate_kernel(NULL, dst_pmdp, dst_ptep); |
| } |
| |
| dst_ptep = pte_offset_kernel(dst_pmdp, start); |
| src_ptep = pte_offset_kernel(src_pmdp, start); |
| |
| do { |
| pte_t pte = READ_ONCE(*src_ptep); |
| |
| if (pte_present(pte)) |
| set_pte(dst_ptep, __pte(pte_val(pte) | pgprot_val(prot))); |
| } while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end); |
| |
| return 0; |
| } |
| |
| static int temp_pgtable_map_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start, |
| unsigned long end, pgprot_t prot) |
| { |
| unsigned long next; |
| unsigned long ret; |
| pmd_t *src_pmdp; |
| pmd_t *dst_pmdp; |
| |
| if (pud_none(READ_ONCE(*dst_pudp))) { |
| dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC); |
| if (!dst_pmdp) |
| return -ENOMEM; |
| |
| pud_populate(NULL, dst_pudp, dst_pmdp); |
| } |
| |
| dst_pmdp = pmd_offset(dst_pudp, start); |
| src_pmdp = pmd_offset(src_pudp, start); |
| |
| do { |
| pmd_t pmd = READ_ONCE(*src_pmdp); |
| |
| next = pmd_addr_end(start, end); |
| |
| if (pmd_none(pmd)) |
| continue; |
| |
| if (pmd_leaf(pmd)) { |
| set_pmd(dst_pmdp, __pmd(pmd_val(pmd) | pgprot_val(prot))); |
| } else { |
| ret = temp_pgtable_map_pte(dst_pmdp, src_pmdp, start, next, prot); |
| if (ret) |
| return -ENOMEM; |
| } |
| } while (dst_pmdp++, src_pmdp++, start = next, start != end); |
| |
| return 0; |
| } |
| |
| static int temp_pgtable_map_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start, |
| unsigned long end, pgprot_t prot) |
| { |
| unsigned long next; |
| unsigned long ret; |
| pud_t *dst_pudp; |
| pud_t *src_pudp; |
| |
| if (p4d_none(READ_ONCE(*dst_p4dp))) { |
| dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC); |
| if (!dst_pudp) |
| return -ENOMEM; |
| |
| p4d_populate(NULL, dst_p4dp, dst_pudp); |
| } |
| |
| dst_pudp = pud_offset(dst_p4dp, start); |
| src_pudp = pud_offset(src_p4dp, start); |
| |
| do { |
| pud_t pud = READ_ONCE(*src_pudp); |
| |
| next = pud_addr_end(start, end); |
| |
| if (pud_none(pud)) |
| continue; |
| |
| if (pud_leaf(pud)) { |
| set_pud(dst_pudp, __pud(pud_val(pud) | pgprot_val(prot))); |
| } else { |
| ret = temp_pgtable_map_pmd(dst_pudp, src_pudp, start, next, prot); |
| if (ret) |
| return -ENOMEM; |
| } |
| } while (dst_pudp++, src_pudp++, start = next, start != end); |
| |
| return 0; |
| } |
| |
| static int temp_pgtable_map_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, |
| unsigned long end, pgprot_t prot) |
| { |
| unsigned long next; |
| unsigned long ret; |
| p4d_t *dst_p4dp; |
| p4d_t *src_p4dp; |
| |
| if (pgd_none(READ_ONCE(*dst_pgdp))) { |
| dst_p4dp = (p4d_t *)get_safe_page(GFP_ATOMIC); |
| if (!dst_p4dp) |
| return -ENOMEM; |
| |
| pgd_populate(NULL, dst_pgdp, dst_p4dp); |
| } |
| |
| dst_p4dp = p4d_offset(dst_pgdp, start); |
| src_p4dp = p4d_offset(src_pgdp, start); |
| |
| do { |
| p4d_t p4d = READ_ONCE(*src_p4dp); |
| |
| next = p4d_addr_end(start, end); |
| |
| if (p4d_none(p4d)) |
| continue; |
| |
| if (p4d_leaf(p4d)) { |
| set_p4d(dst_p4dp, __p4d(p4d_val(p4d) | pgprot_val(prot))); |
| } else { |
| ret = temp_pgtable_map_pud(dst_p4dp, src_p4dp, start, next, prot); |
| if (ret) |
| return -ENOMEM; |
| } |
| } while (dst_p4dp++, src_p4dp++, start = next, start != end); |
| |
| return 0; |
| } |
| |
| static int temp_pgtable_mapping(pgd_t *pgdp, unsigned long start, unsigned long end, pgprot_t prot) |
| { |
| pgd_t *dst_pgdp = pgd_offset_pgd(pgdp, start); |
| pgd_t *src_pgdp = pgd_offset_k(start); |
| unsigned long next; |
| unsigned long ret; |
| |
| do { |
| pgd_t pgd = READ_ONCE(*src_pgdp); |
| |
| next = pgd_addr_end(start, end); |
| |
| if (pgd_none(pgd)) |
| continue; |
| |
| if (pgd_leaf(pgd)) { |
| set_pgd(dst_pgdp, __pgd(pgd_val(pgd) | pgprot_val(prot))); |
| } else { |
| ret = temp_pgtable_map_p4d(dst_pgdp, src_pgdp, start, next, prot); |
| if (ret) |
| return -ENOMEM; |
| } |
| } while (dst_pgdp++, src_pgdp++, start = next, start != end); |
| |
| return 0; |
| } |
| |
| static unsigned long relocate_restore_code(void) |
| { |
| void *page = (void *)get_safe_page(GFP_ATOMIC); |
| |
| if (!page) |
| return -ENOMEM; |
| |
| copy_page(page, hibernate_core_restore_code); |
| |
| /* Make the page containing the relocated code executable. */ |
| set_memory_x((unsigned long)page, 1); |
| |
| return (unsigned long)page; |
| } |
| |
| int swsusp_arch_resume(void) |
| { |
| unsigned long end = (unsigned long)pfn_to_virt(max_low_pfn); |
| unsigned long start = PAGE_OFFSET; |
| int ret; |
| |
| /* |
| * Memory allocated by get_safe_page() will be dealt with by the hibernation core, |
| * we don't need to free it here. |
| */ |
| resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); |
| if (!resume_pg_dir) |
| return -ENOMEM; |
| |
| /* |
| * Create a temporary page table and map the whole linear region as executable and |
| * writable. |
| */ |
| ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE | _PAGE_EXEC)); |
| if (ret) |
| return ret; |
| |
| /* Move the restore code to a new page so that it doesn't get overwritten by itself. */ |
| relocated_restore_code = relocate_restore_code(); |
| if (relocated_restore_code == -ENOMEM) |
| return -ENOMEM; |
| |
| /* |
| * Map the __hibernate_cpu_resume() address to the temporary page table so that the |
| * restore code can jumps to it after finished restore the image. The next execution |
| * code doesn't find itself in a different address space after switching over to the |
| * original page table used by the hibernated image. |
| * The __hibernate_cpu_resume() mapping is unnecessary for RV32 since the kernel and |
| * linear addresses are identical, but different for RV64. To ensure consistency, we |
| * map it for both RV32 and RV64 kernels. |
| * Additionally, we should ensure that the page is writable before restoring the image. |
| */ |
| start = (unsigned long)resume_hdr.restore_cpu_addr; |
| end = start + PAGE_SIZE; |
| |
| ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE)); |
| if (ret) |
| return ret; |
| |
| hibernate_restore_image(resume_hdr.saved_satp, (PFN_DOWN(__pa(resume_pg_dir)) | satp_mode), |
| resume_hdr.restore_cpu_addr); |
| |
| return 0; |
| } |
| |
| #ifdef CONFIG_PM_SLEEP_SMP |
| int hibernate_resume_nonboot_cpu_disable(void) |
| { |
| if (sleep_cpu < 0) { |
| pr_err("Failing to resume from hibernate on an unknown CPU\n"); |
| return -ENODEV; |
| } |
| |
| return freeze_secondary_cpus(sleep_cpu); |
| } |
| #endif |
| |
| static int __init riscv_hibernate_init(void) |
| { |
| hibernate_cpu_context = kzalloc(sizeof(*hibernate_cpu_context), GFP_KERNEL); |
| |
| if (WARN_ON(!hibernate_cpu_context)) |
| return -ENOMEM; |
| |
| return 0; |
| } |
| |
| early_initcall(riscv_hibernate_init); |