blob: afd6634ce17198b588fe2e0c3701f58162227c87 [file] [log] [blame]
Huacai Chen09cfefb2022-05-31 18:04:11 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <linux/init.h>
6#include <linux/export.h>
7#include <linux/signal.h>
8#include <linux/sched.h>
9#include <linux/smp.h>
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/string.h>
13#include <linux/types.h>
14#include <linux/pagemap.h>
15#include <linux/memblock.h>
16#include <linux/memremap.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/highmem.h>
20#include <linux/swap.h>
21#include <linux/proc_fs.h>
22#include <linux/pfn.h>
23#include <linux/hardirq.h>
24#include <linux/gfp.h>
25#include <linux/initrd.h>
26#include <linux/mmzone.h>
27
28#include <asm/asm-offsets.h>
29#include <asm/bootinfo.h>
30#include <asm/cpu.h>
31#include <asm/dma.h>
32#include <asm/mmu_context.h>
33#include <asm/sections.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#include <asm/tlb.h>
37
38/*
39 * We have up to 8 empty zeroed pages so we can map one of the right colour
40 * when needed. Since page is never written to after the initialization we
41 * don't have to care about aliases on other CPUs.
42 */
43unsigned long empty_zero_page, zero_page_mask;
44EXPORT_SYMBOL_GPL(empty_zero_page);
45EXPORT_SYMBOL(zero_page_mask);
46
47void setup_zero_pages(void)
48{
49 unsigned int order, i;
50 struct page *page;
51
52 order = 0;
53
54 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
55 if (!empty_zero_page)
56 panic("Oh boy, that early out of memory?");
57
58 page = virt_to_page((void *)empty_zero_page);
59 split_page(page, order);
60 for (i = 0; i < (1 << order); i++, page++)
61 mark_page_reserved(page);
62
63 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
64}
65
66void copy_user_highpage(struct page *to, struct page *from,
67 unsigned long vaddr, struct vm_area_struct *vma)
68{
69 void *vfrom, *vto;
70
71 vto = kmap_atomic(to);
72 vfrom = kmap_atomic(from);
73 copy_page(vto, vfrom);
74 kunmap_atomic(vfrom);
75 kunmap_atomic(vto);
76 /* Make sure this page is cleared on other CPU's too before using it */
77 smp_wmb();
78}
79
80int __ref page_is_ram(unsigned long pfn)
81{
82 unsigned long addr = PFN_PHYS(pfn);
83
84 return memblock_is_memory(addr) && !memblock_is_reserved(addr);
85}
86
87void __init paging_init(void)
88{
89 unsigned long max_zone_pfns[MAX_NR_ZONES];
90
91#ifdef CONFIG_ZONE_DMA
92 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
93#endif
94#ifdef CONFIG_ZONE_DMA32
95 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
96#endif
97 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
98
99 free_area_init(max_zone_pfns);
100}
101
102void __init mem_init(void)
103{
104 max_mapnr = max_low_pfn;
105 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
106
107 memblock_free_all();
108 setup_zero_pages(); /* Setup zeroed pages. */
109}
110
111void __ref free_initmem(void)
112{
113 free_initmem_default(POISON_FREE_INITMEM);
114}
115
116#ifdef CONFIG_MEMORY_HOTPLUG
117int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
118{
119 unsigned long start_pfn = start >> PAGE_SHIFT;
120 unsigned long nr_pages = size >> PAGE_SHIFT;
121 int ret;
122
123 ret = __add_pages(nid, start_pfn, nr_pages, params);
124
125 if (ret)
126 pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
127 __func__, ret);
128
129 return ret;
130}
131
132#ifdef CONFIG_MEMORY_HOTREMOVE
133void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
134{
135 unsigned long start_pfn = start >> PAGE_SHIFT;
136 unsigned long nr_pages = size >> PAGE_SHIFT;
137 struct page *page = pfn_to_page(start_pfn);
138
139 /* With altmap the first mapped page is offset from @start */
140 if (altmap)
141 page += vmem_altmap_offset(altmap);
142 __remove_pages(start_pfn, nr_pages, altmap);
143}
144#endif
145#endif
146
147/*
148 * Align swapper_pg_dir in to 64K, allows its address to be loaded
149 * with a single LUI instruction in the TLB handlers. If we used
150 * __aligned(64K), its size would get rounded up to the alignment
151 * size, and waste space. So we place it in its own section and align
152 * it in the linker script.
153 */
154pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
155
156pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
157#ifndef __PAGETABLE_PUD_FOLDED
158pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
159#endif
160#ifndef __PAGETABLE_PMD_FOLDED
161pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
162EXPORT_SYMBOL_GPL(invalid_pmd_table);
163#endif
164pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
165EXPORT_SYMBOL(invalid_pte_table);