blob: e969e68de005fd2abf0aee91d51a03fc4e2eeebd [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrey Ryabinin39d114d2015-10-12 18:52:58 +03002/*
3 * This file contains kasan initialization code for ARM64.
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +03007 */
8
9#define pr_fmt(fmt) "kasan: " fmt
10#include <linux/kasan.h>
11#include <linux/kernel.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +010012#include <linux/sched/task.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030013#include <linux/memblock.h>
14#include <linux/start_kernel.h>
Laura Abbott2077be62017-01-10 13:35:49 -080015#include <linux/mm.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030016
Mark Rutlandc1a88e92016-01-25 11:45:02 +000017#include <asm/mmu_context.h>
Ard Biesheuvelf9040772016-02-16 13:52:40 +010018#include <asm/kernel-pgtable.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030019#include <asm/page.h>
20#include <asm/pgalloc.h>
Ard Biesheuvelf9040772016-02-16 13:52:40 +010021#include <asm/sections.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030022#include <asm/tlbflush.h>
23
Andrey Konovalovafe6ef82020-12-22 12:00:53 -080024#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
25
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030026static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
27
Laura Abbott2077be62017-01-10 13:35:49 -080028/*
29 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
30 * directly on kernel symbols (bm_p*d). All the early functions are called too
31 * early to use lm_alias so __p*d_populate functions must be used to populate
32 * with the physical address from __pa_symbol.
33 */
34
Will Deacone17d8022017-11-15 17:36:40 -080035static phys_addr_t __init kasan_alloc_zeroed_page(int node)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030036{
Mike Rapoporteb31d552018-10-30 15:08:04 -070037 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
Will Deacone17d8022017-11-15 17:36:40 -080038 __pa(MAX_DMA_ADDRESS),
Qian Caic6975d72021-11-05 11:05:09 -040039 MEMBLOCK_ALLOC_NOLEAKTRACE, node);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070040 if (!p)
41 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
42 __func__, PAGE_SIZE, PAGE_SIZE, node,
43 __pa(MAX_DMA_ADDRESS));
44
Will Deacone17d8022017-11-15 17:36:40 -080045 return __pa(p);
46}
47
Andrey Konovalov080eb832018-12-28 00:30:09 -080048static phys_addr_t __init kasan_alloc_raw_page(int node)
49{
50 void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
51 __pa(MAX_DMA_ADDRESS),
Qian Caic6975d72021-11-05 11:05:09 -040052 MEMBLOCK_ALLOC_NOLEAKTRACE,
53 node);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070054 if (!p)
55 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
56 __func__, PAGE_SIZE, PAGE_SIZE, node,
57 __pa(MAX_DMA_ADDRESS));
58
Andrey Konovalov080eb832018-12-28 00:30:09 -080059 return __pa(p);
60}
61
Will Deacon20a004e2018-02-15 11:14:56 +000062static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
Will Deacone17d8022017-11-15 17:36:40 -080063 bool early)
64{
Will Deacon20a004e2018-02-15 11:14:56 +000065 if (pmd_none(READ_ONCE(*pmdp))) {
Andrey Konovalov9577dd72018-12-28 00:30:01 -080066 phys_addr_t pte_phys = early ?
67 __pa_symbol(kasan_early_shadow_pte)
68 : kasan_alloc_zeroed_page(node);
Will Deacon20a004e2018-02-15 11:14:56 +000069 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
Will Deacone17d8022017-11-15 17:36:40 -080070 }
71
Will Deacon20a004e2018-02-15 11:14:56 +000072 return early ? pte_offset_kimg(pmdp, addr)
73 : pte_offset_kernel(pmdp, addr);
Will Deacone17d8022017-11-15 17:36:40 -080074}
75
Will Deacon20a004e2018-02-15 11:14:56 +000076static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
Will Deacone17d8022017-11-15 17:36:40 -080077 bool early)
78{
Will Deacon20a004e2018-02-15 11:14:56 +000079 if (pud_none(READ_ONCE(*pudp))) {
Andrey Konovalov9577dd72018-12-28 00:30:01 -080080 phys_addr_t pmd_phys = early ?
81 __pa_symbol(kasan_early_shadow_pmd)
82 : kasan_alloc_zeroed_page(node);
Ard Biesheuvelc1fd78a2021-03-10 11:49:40 +010083 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
Will Deacone17d8022017-11-15 17:36:40 -080084 }
85
Will Deacon20a004e2018-02-15 11:14:56 +000086 return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
Will Deacone17d8022017-11-15 17:36:40 -080087}
88
Mike Rapoporte9f63762020-06-04 16:46:23 -070089static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
Will Deacone17d8022017-11-15 17:36:40 -080090 bool early)
91{
Mike Rapoporte9f63762020-06-04 16:46:23 -070092 if (p4d_none(READ_ONCE(*p4dp))) {
Andrey Konovalov9577dd72018-12-28 00:30:01 -080093 phys_addr_t pud_phys = early ?
94 __pa_symbol(kasan_early_shadow_pud)
95 : kasan_alloc_zeroed_page(node);
Ard Biesheuvelc1fd78a2021-03-10 11:49:40 +010096 __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
Will Deacone17d8022017-11-15 17:36:40 -080097 }
98
Mike Rapoporte9f63762020-06-04 16:46:23 -070099 return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
Will Deacone17d8022017-11-15 17:36:40 -0800100}
101
Will Deacon20a004e2018-02-15 11:14:56 +0000102static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
Will Deacone17d8022017-11-15 17:36:40 -0800103 unsigned long end, int node, bool early)
104{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300105 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000106 pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300107
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300108 do {
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800109 phys_addr_t page_phys = early ?
110 __pa_symbol(kasan_early_shadow_page)
Andrey Konovalov080eb832018-12-28 00:30:09 -0800111 : kasan_alloc_raw_page(node);
112 if (!early)
113 memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300114 next = addr + PAGE_SIZE;
Will Deacon20a004e2018-02-15 11:14:56 +0000115 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
116 } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300117}
118
Will Deacon20a004e2018-02-15 11:14:56 +0000119static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
Will Deacone17d8022017-11-15 17:36:40 -0800120 unsigned long end, int node, bool early)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300121{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300122 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000123 pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300124
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300125 do {
126 next = pmd_addr_end(addr, end);
Will Deacon20a004e2018-02-15 11:14:56 +0000127 kasan_pte_populate(pmdp, addr, next, node, early);
128 } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300129}
130
Mike Rapoporte9f63762020-06-04 16:46:23 -0700131static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
Will Deacone17d8022017-11-15 17:36:40 -0800132 unsigned long end, int node, bool early)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300133{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300134 unsigned long next;
Mike Rapoporte9f63762020-06-04 16:46:23 -0700135 pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300136
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300137 do {
138 next = pud_addr_end(addr, end);
Will Deacon20a004e2018-02-15 11:14:56 +0000139 kasan_pmd_populate(pudp, addr, next, node, early);
140 } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300141}
142
Mike Rapoporte9f63762020-06-04 16:46:23 -0700143static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
144 unsigned long end, int node, bool early)
145{
146 unsigned long next;
147 p4d_t *p4dp = p4d_offset(pgdp, addr);
148
149 do {
150 next = p4d_addr_end(addr, end);
151 kasan_pud_populate(p4dp, addr, next, node, early);
152 } while (p4dp++, addr = next, addr != end);
153}
154
Will Deacone17d8022017-11-15 17:36:40 -0800155static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
156 int node, bool early)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300157{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300158 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000159 pgd_t *pgdp;
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300160
Will Deacon20a004e2018-02-15 11:14:56 +0000161 pgdp = pgd_offset_k(addr);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300162 do {
163 next = pgd_addr_end(addr, end);
Mike Rapoporte9f63762020-06-04 16:46:23 -0700164 kasan_p4d_populate(pgdp, addr, next, node, early);
Will Deacon20a004e2018-02-15 11:14:56 +0000165 } while (pgdp++, addr = next, addr != end);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300166}
167
Will Deacone17d8022017-11-15 17:36:40 -0800168/* The early shadow maps everything to a single page of zeroes */
Will Deacon83040122015-10-13 14:01:06 +0100169asmlinkage void __init kasan_early_init(void)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300170{
Andrey Konovalov917538e2018-02-06 15:36:44 -0800171 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
172 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
Steve Capper90ec95c2019-08-07 16:55:17 +0100173 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
174 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300175 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
Will Deacone17d8022017-11-15 17:36:40 -0800176 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
177 true);
178}
179
180/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
181static void __init kasan_map_populate(unsigned long start, unsigned long end,
182 int node)
183{
184 kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300185}
186
Mark Rutland068a17a2016-01-25 11:45:12 +0000187/*
188 * Copy the current shadow region into a new pgdir.
189 */
190void __init kasan_copy_shadow(pgd_t *pgdir)
191{
Will Deacon20a004e2018-02-15 11:14:56 +0000192 pgd_t *pgdp, *pgdp_new, *pgdp_end;
Mark Rutland068a17a2016-01-25 11:45:12 +0000193
Will Deacon20a004e2018-02-15 11:14:56 +0000194 pgdp = pgd_offset_k(KASAN_SHADOW_START);
195 pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
Mike Rapoport974b9b22020-06-08 21:33:10 -0700196 pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
Mark Rutland068a17a2016-01-25 11:45:12 +0000197 do {
Will Deacon20a004e2018-02-15 11:14:56 +0000198 set_pgd(pgdp_new, READ_ONCE(*pgdp));
199 } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
Mark Rutland068a17a2016-01-25 11:45:12 +0000200}
201
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300202static void __init clear_pgds(unsigned long start,
203 unsigned long end)
204{
205 /*
206 * Remove references to kasan page tables from
207 * swapper_pg_dir. pgd_clear() can't be used
208 * here because it's nop on 2,3-level pagetable setups
209 */
210 for (; start < end; start += PGDIR_SIZE)
211 set_pgd(pgd_offset_k(start), __pgd(0));
212}
213
Andrey Konovalovafe6ef82020-12-22 12:00:53 -0800214static void __init kasan_init_shadow(void)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300215{
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100216 u64 kimg_shadow_start, kimg_shadow_end;
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100217 u64 mod_shadow_start, mod_shadow_end;
Lecopzer Chen9a0732e2021-03-24 12:05:18 +0800218 u64 vmalloc_shadow_end;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700219 phys_addr_t pa_start, pa_end;
220 u64 i;
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300221
Lecopzer Chen7d7b88f2021-03-24 12:05:19 +0800222 kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
223 kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100224
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100225 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
226 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
227
Lecopzer Chen9a0732e2021-03-24 12:05:18 +0800228 vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
229
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300230 /*
231 * We are going to perform proper setup of shadow memory.
Kyrylo Tkachov0293c8b2018-10-04 17:06:46 +0100232 * At first we should unmap early shadow (clear_pgds() call below).
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300233 * However, instrumented code couldn't execute without shadow memory.
234 * tmp_pg_dir used to keep early shadow mapped until full shadow
235 * setup will be finished.
236 */
237 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
Mark Rutlandc1a88e92016-01-25 11:45:02 +0000238 dsb(ishst);
Ard Biesheuvel1682c452022-06-24 17:06:40 +0200239 cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300240
241 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
242
Will Deacone17d8022017-11-15 17:36:40 -0800243 kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
Lecopzer Chen7d7b88f2021-03-24 12:05:19 +0800244 early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100245
Mark Rutland77ad4ce2019-08-14 14:28:48 +0100246 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
Steve Capper14c127c2019-08-07 16:55:14 +0100247 (void *)mod_shadow_start);
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100248
Lecopzer Chen9a0732e2021-03-24 12:05:18 +0800249 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
250 BUILD_BUG_ON(VMALLOC_START != MODULES_END);
251 kasan_populate_early_shadow((void *)vmalloc_shadow_end,
252 (void *)KASAN_SHADOW_END);
253 } else {
254 kasan_populate_early_shadow((void *)kimg_shadow_end,
255 (void *)KASAN_SHADOW_END);
256 if (kimg_shadow_start > mod_shadow_end)
257 kasan_populate_early_shadow((void *)mod_shadow_end,
258 (void *)kimg_shadow_start);
259 }
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300260
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700261 for_each_mem_range(i, &pa_start, &pa_end) {
262 void *start = (void *)__phys_to_virt(pa_start);
263 void *end = (void *)__phys_to_virt(pa_end);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300264
265 if (start >= end)
266 break;
267
Will Deacone17d8022017-11-15 17:36:40 -0800268 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
269 (unsigned long)kasan_mem_to_shadow(end),
Mark Rutland800cb2e2018-04-16 14:44:41 +0100270 early_pfn_to_nid(virt_to_pfn(start)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300271 }
272
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100273 /*
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800274 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
275 * so we should make sure that it maps the zero page read-only.
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100276 */
277 for (i = 0; i < PTRS_PER_PTE; i++)
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800278 set_pte(&kasan_early_shadow_pte[i],
279 pfn_pte(sym_to_pfn(kasan_early_shadow_page),
280 PAGE_KERNEL_RO));
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100281
Andrey Konovalov080eb832018-12-28 00:30:09 -0800282 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
Ard Biesheuvel1682c452022-06-24 17:06:40 +0200283 cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
Andrey Konovalovafe6ef82020-12-22 12:00:53 -0800284}
285
Andrey Konovalovd73b4932020-12-22 12:00:56 -0800286static void __init kasan_init_depth(void)
287{
288 init_task.kasan_depth = 0;
289}
290
Kefeng Wang3252b1d2021-11-05 13:39:47 -0700291#ifdef CONFIG_KASAN_VMALLOC
292void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
293{
294 unsigned long shadow_start, shadow_end;
295
296 if (!is_vmalloc_or_module_addr(start))
297 return;
298
299 shadow_start = (unsigned long)kasan_mem_to_shadow(start);
300 shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
301 shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
302 shadow_end = ALIGN(shadow_end, PAGE_SIZE);
303 kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE);
304}
305#endif
306
Andrey Konovalovafe6ef82020-12-22 12:00:53 -0800307void __init kasan_init(void)
308{
309 kasan_init_shadow();
Andrey Konovalovd73b4932020-12-22 12:00:56 -0800310 kasan_init_depth();
Andrey Konovalov28ab3582020-12-22 12:01:00 -0800311#if defined(CONFIG_KASAN_GENERIC)
Andrey Konovalov60a3a5f2020-12-22 12:01:03 -0800312 /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
Kuan-Ying Leeb873e982021-11-10 20:32:49 -0800313 pr_info("KernelAddressSanitizer initialized (generic)\n");
Andrey Konovalov28ab3582020-12-22 12:01:00 -0800314#endif
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300315}
Andrey Konovalov28ab3582020-12-22 12:01:00 -0800316
317#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */