blob: 28124d0fa1d5e8839dc9a776f6739ca6b81950fb [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Michael Holzheu60a0c682011-10-30 15:16:40 +01002/*
3 * S390 kdump implementation
4 *
5 * Copyright IBM Corp. 2011
6 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
7 */
8
9#include <linux/crash_dump.h>
10#include <asm/lowcore.h>
11#include <linux/kernel.h>
Paul Gortmaker3994a522017-02-09 15:20:23 -050012#include <linux/init.h>
13#include <linux/mm.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010014#include <linux/gfp.h>
15#include <linux/slab.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070016#include <linux/memblock.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010017#include <linux/elf.h>
Matthew Wilcox (Oracle)5d8de292022-04-29 14:37:59 -070018#include <linux/uio.h>
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +010019#include <asm/asm-offsets.h>
Michael Holzheu4857d4b2012-03-11 11:59:34 -040020#include <asm/os_info.h>
Heiko Carstens6b563d82012-09-14 14:11:32 +020021#include <asm/elf.h>
22#include <asm/ipl.h>
Michael Holzheu6f79d332013-09-11 14:24:54 -070023#include <asm/sclp.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010024
25#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
26#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
27#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
28
Philipp Hachtmann50be6342014-01-29 18:16:01 +010029static struct memblock_region oldmem_region;
30
31static struct memblock_type oldmem_type = {
32 .cnt = 1,
33 .max = 1,
34 .total_size = 0,
35 .regions = &oldmem_region,
Heiko Carstens0262d9c2017-02-24 14:55:59 -080036 .name = "oldmem",
Philipp Hachtmann50be6342014-01-29 18:16:01 +010037};
38
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +010039struct save_area {
40 struct list_head list;
41 u64 psw[2];
42 u64 ctrs[16];
43 u64 gprs[16];
44 u32 acrs[16];
45 u64 fprs[16];
46 u32 fpc;
47 u32 prefix;
48 u64 todpreg;
49 u64 timer;
50 u64 todcmp;
51 u64 vxrs_low[16];
52 __vector128 vxrs_high[16];
53};
54
55static LIST_HEAD(dump_save_areas);
56
57/*
58 * Allocate a save area
59 */
60struct save_area * __init save_area_alloc(bool is_boot_cpu)
61{
62 struct save_area *sa;
63
Heiko Carstensa0e45d42021-12-16 13:27:10 +010064 sa = memblock_alloc(sizeof(*sa), 8);
Mike Rapoportecc3e772019-03-11 23:29:26 -070065 if (!sa)
66 panic("Failed to allocate save area\n");
67
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +010068 if (is_boot_cpu)
69 list_add(&sa->list, &dump_save_areas);
70 else
71 list_add_tail(&sa->list, &dump_save_areas);
72 return sa;
73}
74
75/*
76 * Return the address of the save area for the boot CPU
77 */
78struct save_area * __init save_area_boot_cpu(void)
79{
Masahiro Yamadaf2961902016-09-13 03:10:39 +090080 return list_first_entry_or_null(&dump_save_areas, struct save_area, list);
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +010081}
82
83/*
84 * Copy CPU registers into the save area
85 */
86void __init save_area_add_regs(struct save_area *sa, void *regs)
87{
Heiko Carstensc667aea2015-12-31 10:29:00 +010088 struct lowcore *lc;
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +010089
Heiko Carstensc667aea2015-12-31 10:29:00 +010090 lc = (struct lowcore *)(regs - __LC_FPREGS_SAVE_AREA);
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +010091 memcpy(&sa->psw, &lc->psw_save_area, sizeof(sa->psw));
92 memcpy(&sa->ctrs, &lc->cregs_save_area, sizeof(sa->ctrs));
93 memcpy(&sa->gprs, &lc->gpregs_save_area, sizeof(sa->gprs));
94 memcpy(&sa->acrs, &lc->access_regs_save_area, sizeof(sa->acrs));
95 memcpy(&sa->fprs, &lc->floating_pt_save_area, sizeof(sa->fprs));
96 memcpy(&sa->fpc, &lc->fpt_creg_save_area, sizeof(sa->fpc));
97 memcpy(&sa->prefix, &lc->prefixreg_save_area, sizeof(sa->prefix));
98 memcpy(&sa->todpreg, &lc->tod_progreg_save_area, sizeof(sa->todpreg));
99 memcpy(&sa->timer, &lc->cpu_timer_save_area, sizeof(sa->timer));
100 memcpy(&sa->todcmp, &lc->clock_comp_save_area, sizeof(sa->todcmp));
101}
102
103/*
104 * Copy vector registers into the save area
105 */
106void __init save_area_add_vxrs(struct save_area *sa, __vector128 *vxrs)
107{
108 int i;
109
110 /* Copy lower halves of vector registers 0-15 */
111 for (i = 0; i < 16; i++)
112 memcpy(&sa->vxrs_low[i], &vxrs[i].u[2], 8);
113 /* Copy vector registers 16-31 */
114 memcpy(sa->vxrs_high, vxrs + 16, 16 * sizeof(__vector128));
115}
Michael Holzheu58952942013-10-11 10:29:23 +0200116
117/*
Michael Holzheu191a2fa2013-07-18 12:18:27 +0200118 * Return physical address for virtual address
119 */
120static inline void *load_real_addr(void *addr)
121{
122 unsigned long real_addr;
123
124 asm volatile(
125 " lra %0,0(%1)\n"
126 " jz 0f\n"
127 " la %0,0\n"
128 "0:"
129 : "=a" (real_addr) : "a" (addr) : "cc");
130 return (void *)real_addr;
131}
132
133/*
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200134 * Copy memory of the old, dumped system to a kernel space virtual address
Michael Holzheu191a2fa2013-07-18 12:18:27 +0200135 */
Alexander Gordeev303fd982022-01-29 09:24:50 +0100136int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
Michael Holzheu191a2fa2013-07-18 12:18:27 +0200137{
Alexander Gordeev303fd982022-01-29 09:24:50 +0100138 unsigned long len;
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200139 void *ra;
140 int rc;
Michael Holzheu191a2fa2013-07-18 12:18:27 +0200141
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200142 while (count) {
Alexander Gordeev303fd982022-01-29 09:24:50 +0100143 if (!oldmem_data.start && src < sclp.hsa_size) {
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200144 /* Copy from zfcp/nvme dump HSA area */
Alexander Gordeev303fd982022-01-29 09:24:50 +0100145 len = min(count, sclp.hsa_size - src);
146 rc = memcpy_hsa_kernel(dst, src, len);
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200147 if (rc)
148 return rc;
149 } else {
150 /* Check for swapped kdump oldmem areas */
Alexander Gordeev303fd982022-01-29 09:24:50 +0100151 if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
152 src -= oldmem_data.start;
153 len = min(count, oldmem_data.size - src);
154 } else if (oldmem_data.start && src < oldmem_data.size) {
155 len = min(count, oldmem_data.size - src);
156 src += oldmem_data.start;
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200157 } else {
158 len = count;
159 }
160 if (is_vmalloc_or_module_addr(dst)) {
161 ra = load_real_addr(dst);
162 len = min(PAGE_SIZE - offset_in_page(ra), len);
163 } else {
164 ra = dst;
165 }
Alexander Gordeev303fd982022-01-29 09:24:50 +0100166 if (memcpy_real(ra, src, len))
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200167 return -EFAULT;
168 }
169 dst += len;
170 src += len;
171 count -= len;
172 }
Michael Holzheu4d3b0662013-09-27 15:41:43 +0200173 return 0;
Michael Holzheu191a2fa2013-07-18 12:18:27 +0200174}
175
Michael Holzheu60a0c682011-10-30 15:16:40 +0100176/*
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200177 * Copy memory of the old, dumped system to a user space virtual address
Michael Holzheu6f79d332013-09-11 14:24:54 -0700178 */
Alexander Gordeev303fd982022-01-29 09:24:50 +0100179static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
Michael Holzheu6f79d332013-09-11 14:24:54 -0700180{
Alexander Gordeev303fd982022-01-29 09:24:50 +0100181 unsigned long len;
Michael Holzheu6f79d332013-09-11 14:24:54 -0700182 int rc;
183
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200184 while (count) {
Alexander Gordeev303fd982022-01-29 09:24:50 +0100185 if (!oldmem_data.start && src < sclp.hsa_size) {
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200186 /* Copy from zfcp/nvme dump HSA area */
Alexander Gordeev303fd982022-01-29 09:24:50 +0100187 len = min(count, sclp.hsa_size - src);
188 rc = memcpy_hsa_user(dst, src, len);
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200189 if (rc)
190 return rc;
191 } else {
192 /* Check for swapped kdump oldmem areas */
Alexander Gordeev303fd982022-01-29 09:24:50 +0100193 if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
194 src -= oldmem_data.start;
195 len = min(count, oldmem_data.size - src);
196 } else if (oldmem_data.start && src < oldmem_data.size) {
197 len = min(count, oldmem_data.size - src);
198 src += oldmem_data.start;
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200199 } else {
200 len = count;
201 }
Alexander Gordeev303fd982022-01-29 09:24:50 +0100202 rc = copy_to_user_real(dst, src, count);
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200203 if (rc)
204 return rc;
205 }
206 dst += len;
207 src += len;
208 count -= len;
Michael Holzheu6f79d332013-09-11 14:24:54 -0700209 }
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200210 return 0;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100211}
212
213/*
Michael Holzheu6f79d332013-09-11 14:24:54 -0700214 * Copy one page from "oldmem"
215 */
Matthew Wilcox (Oracle)5d8de292022-04-29 14:37:59 -0700216ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
217 unsigned long offset)
Michael Holzheu6f79d332013-09-11 14:24:54 -0700218{
Alexander Gordeev303fd982022-01-29 09:24:50 +0100219 unsigned long src;
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200220 int rc;
Michael Holzheu6f79d332013-09-11 14:24:54 -0700221
Alexander Gordeevcc02e6e2022-06-10 11:04:36 +0200222 if (!(iter_is_iovec(iter) || iov_iter_is_kvec(iter)))
223 return -EINVAL;
224 /* Multi-segment iterators are not supported */
225 if (iter->nr_segs > 1)
226 return -EINVAL;
Michael Holzheu6f79d332013-09-11 14:24:54 -0700227 if (!csize)
228 return 0;
Alexander Gordeev303fd982022-01-29 09:24:50 +0100229 src = pfn_to_phys(pfn) + offset;
Matthew Wilcox (Oracle)5d8de292022-04-29 14:37:59 -0700230
231 /* XXX: pass the iov_iter down to a common function */
232 if (iter_is_iovec(iter))
233 rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
Michael Holzheu6f79d332013-09-11 14:24:54 -0700234 else
Matthew Wilcox (Oracle)5d8de292022-04-29 14:37:59 -0700235 rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
Alexander Gordeevaf2debd2022-06-09 21:32:39 +0200236 if (rc < 0)
237 return rc;
238 iov_iter_advance(iter, csize);
239 return csize;
Michael Holzheu6f79d332013-09-11 14:24:54 -0700240}
241
242/*
243 * Remap "oldmem" for kdump
Jan Willeke23df79d2013-09-11 14:24:52 -0700244 *
245 * For the kdump reserved memory this functions performs a swap operation:
246 * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
247 */
Michael Holzheu6f79d332013-09-11 14:24:54 -0700248static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
249 unsigned long from, unsigned long pfn,
250 unsigned long size, pgprot_t prot)
Jan Willeke23df79d2013-09-11 14:24:52 -0700251{
252 unsigned long size_old;
253 int rc;
254
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200255 if (pfn < oldmem_data.size >> PAGE_SHIFT) {
256 size_old = min(size, oldmem_data.size - (pfn << PAGE_SHIFT));
Jan Willeke23df79d2013-09-11 14:24:52 -0700257 rc = remap_pfn_range(vma, from,
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200258 pfn + (oldmem_data.start >> PAGE_SHIFT),
Jan Willeke23df79d2013-09-11 14:24:52 -0700259 size_old, prot);
260 if (rc || size == size_old)
261 return rc;
262 size -= size_old;
263 from += size_old;
264 pfn += size_old >> PAGE_SHIFT;
265 }
266 return remap_pfn_range(vma, from, pfn, size, prot);
267}
268
269/*
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200270 * Remap "oldmem" for zfcp/nvme dump
Michael Holzheu6f79d332013-09-11 14:24:54 -0700271 *
Michael Holzheue657d8f2013-11-13 10:38:27 +0100272 * We only map available memory above HSA size. Memory below HSA size
273 * is read on demand using the copy_oldmem_page() function.
Michael Holzheu6f79d332013-09-11 14:24:54 -0700274 */
275static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
276 unsigned long from,
277 unsigned long pfn,
278 unsigned long size, pgprot_t prot)
279{
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200280 unsigned long hsa_end = sclp.hsa_size;
Michael Holzheu6f79d332013-09-11 14:24:54 -0700281 unsigned long size_hsa;
282
Michael Holzheue657d8f2013-11-13 10:38:27 +0100283 if (pfn < hsa_end >> PAGE_SHIFT) {
284 size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
Michael Holzheu6f79d332013-09-11 14:24:54 -0700285 if (size == size_hsa)
286 return 0;
287 size -= size_hsa;
288 from += size_hsa;
289 pfn += size_hsa >> PAGE_SHIFT;
290 }
291 return remap_pfn_range(vma, from, pfn, size, prot);
292}
293
294/*
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200295 * Remap "oldmem" for kdump or zfcp/nvme dump
Michael Holzheu6f79d332013-09-11 14:24:54 -0700296 */
297int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
298 unsigned long pfn, unsigned long size, pgprot_t prot)
299{
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200300 if (oldmem_data.start)
Michael Holzheu6f79d332013-09-11 14:24:54 -0700301 return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
302 else
303 return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
304 prot);
305}
306
Philipp Rudo8cce4372018-07-19 11:13:45 +0200307static const char *nt_name(Elf64_Word type)
308{
309 const char *name = "LINUX";
310
311 if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
312 name = KEXEC_CORE_NOTE_NAME;
313 return name;
314}
315
Michael Holzheu60a0c682011-10-30 15:16:40 +0100316/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100317 * Initialize ELF note
318 */
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100319static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
320 const char *name)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100321{
322 Elf64_Nhdr *note;
323 u64 len;
324
325 note = (Elf64_Nhdr *)buf;
326 note->n_namesz = strlen(name) + 1;
327 note->n_descsz = d_len;
328 note->n_type = type;
329 len = sizeof(Elf64_Nhdr);
330
331 memcpy(buf + len, name, note->n_namesz);
332 len = roundup(len + note->n_namesz, 4);
333
334 memcpy(buf + len, desc, note->n_descsz);
335 len = roundup(len + note->n_descsz, 4);
336
337 return PTR_ADD(buf, len);
338}
339
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100340static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100341{
Philipp Rudo8cce4372018-07-19 11:13:45 +0200342 return nt_init_name(buf, type, desc, d_len, nt_name(type));
343}
Michael Holzheua4a81d82017-02-07 18:09:14 +0100344
Philipp Rudo8cce4372018-07-19 11:13:45 +0200345/*
346 * Calculate the size of ELF note
347 */
348static size_t nt_size_name(int d_len, const char *name)
349{
350 size_t size;
351
352 size = sizeof(Elf64_Nhdr);
353 size += roundup(strlen(name) + 1, 4);
354 size += roundup(d_len, 4);
355
356 return size;
357}
358
359static inline size_t nt_size(Elf64_Word type, int d_len)
360{
361 return nt_size_name(d_len, nt_name(type));
Michael Holzheua62bc072014-10-06 17:57:43 +0200362}
363
364/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100365 * Fill ELF notes for one CPU with save area registers
366 */
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100367static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100368{
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100369 struct elf_prstatus nt_prstatus;
370 elf_fpregset_t nt_fpregset;
371
372 /* Prepare prstatus note */
373 memset(&nt_prstatus, 0, sizeof(nt_prstatus));
374 memcpy(&nt_prstatus.pr_reg.gprs, sa->gprs, sizeof(sa->gprs));
375 memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
376 memcpy(&nt_prstatus.pr_reg.acrs, sa->acrs, sizeof(sa->acrs));
Al Virof2485a22020-06-13 00:08:44 -0400377 nt_prstatus.common.pr_pid = cpu;
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100378 /* Prepare fpregset (floating point) note */
379 memset(&nt_fpregset, 0, sizeof(nt_fpregset));
380 memcpy(&nt_fpregset.fpc, &sa->fpc, sizeof(sa->fpc));
381 memcpy(&nt_fpregset.fprs, &sa->fprs, sizeof(sa->fprs));
382 /* Create ELF notes for the CPU */
383 ptr = nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus));
384 ptr = nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset));
385 ptr = nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer));
386 ptr = nt_init(ptr, NT_S390_TODCMP, &sa->todcmp, sizeof(sa->todcmp));
387 ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg));
388 ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs));
389 ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix));
390 if (MACHINE_HAS_VX) {
391 ptr = nt_init(ptr, NT_S390_VXRS_HIGH,
392 &sa->vxrs_high, sizeof(sa->vxrs_high));
393 ptr = nt_init(ptr, NT_S390_VXRS_LOW,
394 &sa->vxrs_low, sizeof(sa->vxrs_low));
Michael Holzheua62bc072014-10-06 17:57:43 +0200395 }
Michael Holzheu60a0c682011-10-30 15:16:40 +0100396 return ptr;
397}
398
399/*
Philipp Rudo8cce4372018-07-19 11:13:45 +0200400 * Calculate size of ELF notes per cpu
401 */
402static size_t get_cpu_elf_notes_size(void)
403{
404 struct save_area *sa = NULL;
405 size_t size;
406
407 size = nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
408 size += nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
409 size += nt_size(NT_S390_TIMER, sizeof(sa->timer));
410 size += nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
411 size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
412 size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
413 size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
414 if (MACHINE_HAS_VX) {
415 size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
416 size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
417 }
418
419 return size;
420}
421
422/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100423 * Initialize prpsinfo note (new kernel)
424 */
425static void *nt_prpsinfo(void *ptr)
426{
427 struct elf_prpsinfo prpsinfo;
428
429 memset(&prpsinfo, 0, sizeof(prpsinfo));
430 prpsinfo.pr_sname = 'R';
431 strcpy(prpsinfo.pr_fname, "vmlinux");
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100432 return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo));
Michael Holzheu60a0c682011-10-30 15:16:40 +0100433}
434
435/*
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400436 * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100437 */
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400438static void *get_vmcoreinfo_old(unsigned long *size)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100439{
440 char nt_name[11], *vmcoreinfo;
Alexander Gordeev303fd982022-01-29 09:24:50 +0100441 unsigned long addr;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100442 Elf64_Nhdr note;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100443
Alexander Gordeev303fd982022-01-29 09:24:50 +0100444 if (copy_oldmem_kernel(&addr, __LC_VMCORE_INFO, sizeof(addr)))
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400445 return NULL;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100446 memset(nt_name, 0, sizeof(nt_name));
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200447 if (copy_oldmem_kernel(&note, addr, sizeof(note)))
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400448 return NULL;
Martin Schwidefskydf9694c2015-10-12 10:43:37 +0200449 if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
450 sizeof(nt_name) - 1))
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400451 return NULL;
Philipp Rudo2d2e7072018-08-13 11:16:57 +0200452 if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400453 return NULL;
Philipp Rudo28b74652018-08-13 12:45:06 +0200454 vmcoreinfo = kzalloc(note.n_descsz, GFP_KERNEL);
455 if (!vmcoreinfo)
456 return NULL;
Philipp Rudo2d2e7072018-08-13 11:16:57 +0200457 if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
458 kfree(vmcoreinfo);
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400459 return NULL;
Philipp Rudo2d2e7072018-08-13 11:16:57 +0200460 }
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400461 *size = note.n_descsz;
462 return vmcoreinfo;
463}
464
465/*
466 * Initialize vmcoreinfo note (new kernel)
467 */
468static void *nt_vmcoreinfo(void *ptr)
469{
Philipp Rudo2d2e7072018-08-13 11:16:57 +0200470 const char *name = VMCOREINFO_NOTE_NAME;
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400471 unsigned long size;
472 void *vmcoreinfo;
473
474 vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
Philipp Rudo2d2e7072018-08-13 11:16:57 +0200475 if (vmcoreinfo)
476 return nt_init_name(ptr, 0, vmcoreinfo, size, name);
477
478 vmcoreinfo = get_vmcoreinfo_old(&size);
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400479 if (!vmcoreinfo)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100480 return ptr;
Philipp Rudo2d2e7072018-08-13 11:16:57 +0200481 ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
482 kfree(vmcoreinfo);
483 return ptr;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100484}
485
Philipp Rudo8cce4372018-07-19 11:13:45 +0200486static size_t nt_vmcoreinfo_size(void)
487{
Philipp Rudo263b0e42018-08-06 13:39:52 +0200488 const char *name = VMCOREINFO_NOTE_NAME;
489 unsigned long size;
490 void *vmcoreinfo;
Philipp Rudo8cce4372018-07-19 11:13:45 +0200491
Philipp Rudo263b0e42018-08-06 13:39:52 +0200492 vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
493 if (vmcoreinfo)
494 return nt_size_name(size, name);
495
496 vmcoreinfo = get_vmcoreinfo_old(&size);
497 if (!vmcoreinfo)
Philipp Rudo8cce4372018-07-19 11:13:45 +0200498 return 0;
499
Philipp Rudo263b0e42018-08-06 13:39:52 +0200500 kfree(vmcoreinfo);
501 return nt_size_name(size, name);
Philipp Rudo8cce4372018-07-19 11:13:45 +0200502}
503
Michael Holzheu60a0c682011-10-30 15:16:40 +0100504/*
Michael Holzheudcc00b72017-03-23 21:02:54 +0100505 * Initialize final note (needed for /proc/vmcore code)
506 */
507static void *nt_final(void *ptr)
508{
509 Elf64_Nhdr *note;
510
511 note = (Elf64_Nhdr *) ptr;
512 note->n_namesz = 0;
513 note->n_descsz = 0;
514 note->n_type = 0;
515 return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
516}
517
518/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100519 * Initialize ELF header (new kernel)
520 */
521static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
522{
523 memset(ehdr, 0, sizeof(*ehdr));
524 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
525 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
526 ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
527 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
528 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
529 ehdr->e_type = ET_CORE;
530 ehdr->e_machine = EM_S390;
531 ehdr->e_version = EV_CURRENT;
532 ehdr->e_phoff = sizeof(Elf64_Ehdr);
533 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
534 ehdr->e_phentsize = sizeof(Elf64_Phdr);
535 ehdr->e_phnum = mem_chunk_cnt + 1;
536 return ehdr + 1;
537}
538
539/*
540 * Return CPU count for ELF header (new kernel)
541 */
542static int get_cpu_cnt(void)
543{
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100544 struct save_area *sa;
545 int cpus = 0;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100546
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100547 list_for_each_entry(sa, &dump_save_areas, list)
548 if (sa->prefix != 0)
549 cpus++;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100550 return cpus;
551}
552
553/*
554 * Return memory chunk count for ELF header (new kernel)
555 */
556static int get_mem_chunk_cnt(void)
557{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100558 int cnt = 0;
559 u64 idx;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100560
David Hildenbrand77649902020-07-01 16:18:29 +0200561 for_each_physmem_range(idx, &oldmem_type, NULL, NULL)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100562 cnt++;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100563 return cnt;
564}
565
566/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100567 * Initialize ELF loads (new kernel)
568 */
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100569static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100570{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100571 phys_addr_t start, end;
572 u64 idx;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100573
David Hildenbrand77649902020-07-01 16:18:29 +0200574 for_each_physmem_range(idx, &oldmem_type, &start, &end) {
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100575 phdr->p_filesz = end - start;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100576 phdr->p_type = PT_LOAD;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100577 phdr->p_offset = start;
578 phdr->p_vaddr = start;
579 phdr->p_paddr = start;
580 phdr->p_memsz = end - start;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100581 phdr->p_flags = PF_R | PF_W | PF_X;
582 phdr->p_align = PAGE_SIZE;
583 phdr++;
584 }
Michael Holzheu60a0c682011-10-30 15:16:40 +0100585}
586
587/*
588 * Initialize notes (new kernel)
589 */
590static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
591{
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100592 struct save_area *sa;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100593 void *ptr_start = ptr;
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100594 int cpu;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100595
596 ptr = nt_prpsinfo(ptr);
597
Martin Schwidefsky1a2c5842015-10-29 10:59:15 +0100598 cpu = 1;
599 list_for_each_entry(sa, &dump_save_areas, list)
600 if (sa->prefix != 0)
601 ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100602 ptr = nt_vmcoreinfo(ptr);
Michael Holzheudcc00b72017-03-23 21:02:54 +0100603 ptr = nt_final(ptr);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100604 memset(phdr, 0, sizeof(*phdr));
605 phdr->p_type = PT_NOTE;
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700606 phdr->p_offset = notes_offset;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100607 phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
608 phdr->p_memsz = phdr->p_filesz;
609 return ptr;
610}
611
Philipp Rudo8cce4372018-07-19 11:13:45 +0200612static size_t get_elfcorehdr_size(int mem_chunk_cnt)
613{
614 size_t size;
615
616 size = sizeof(Elf64_Ehdr);
617 /* PT_NOTES */
618 size += sizeof(Elf64_Phdr);
619 /* nt_prpsinfo */
620 size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
621 /* regsets */
622 size += get_cpu_cnt() * get_cpu_elf_notes_size();
623 /* nt_vmcoreinfo */
624 size += nt_vmcoreinfo_size();
625 /* nt_final */
626 size += sizeof(Elf64_Nhdr);
627 /* PT_LOADS */
628 size += mem_chunk_cnt * sizeof(Elf64_Phdr);
629
630 return size;
631}
632
Michael Holzheu60a0c682011-10-30 15:16:40 +0100633/*
634 * Create ELF core header (new kernel)
635 */
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700636int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100637{
638 Elf64_Phdr *phdr_notes, *phdr_loads;
639 int mem_chunk_cnt;
640 void *ptr, *hdr;
641 u32 alloc_size;
642 u64 hdr_off;
643
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200644 /* If we are not in kdump or zfcp/nvme dump mode return */
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200645 if (!oldmem_data.start && !is_ipl_type_dump())
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700646 return 0;
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200647 /* If we cannot get HSA size for zfcp/nvme dump return error */
648 if (is_ipl_type_dump() && !sclp.hsa_size)
Michael Holzheue657d8f2013-11-13 10:38:27 +0100649 return -ENODEV;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100650
651 /* For kdump, exclude previous crashkernel memory */
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200652 if (oldmem_data.start) {
653 oldmem_region.base = oldmem_data.start;
654 oldmem_region.size = oldmem_data.size;
655 oldmem_type.total_size = oldmem_data.size;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100656 }
657
Michael Holzheu60a0c682011-10-30 15:16:40 +0100658 mem_chunk_cnt = get_mem_chunk_cnt();
659
Philipp Rudo8cce4372018-07-19 11:13:45 +0200660 alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
661
Philipp Rudo28b74652018-08-13 12:45:06 +0200662 hdr = kzalloc(alloc_size, GFP_KERNEL);
663
664 /* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
665 * a dump with this crash kernel will fail. Panic now to allow other
666 * dump mechanisms to take over.
667 */
668 if (!hdr)
669 panic("s390 kdump allocating elfcorehdr failed");
670
Michael Holzheu60a0c682011-10-30 15:16:40 +0100671 /* Init elf header */
672 ptr = ehdr_init(hdr, mem_chunk_cnt);
673 /* Init program headers */
674 phdr_notes = ptr;
675 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
676 phdr_loads = ptr;
677 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
678 /* Init notes */
679 hdr_off = PTR_DIFF(ptr, hdr);
680 ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
681 /* Init loads */
682 hdr_off = PTR_DIFF(ptr, hdr);
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700683 loads_init(phdr_loads, hdr_off);
684 *addr = (unsigned long long) hdr;
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700685 *size = (unsigned long long) hdr_off;
686 BUG_ON(elfcorehdr_size > alloc_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100687 return 0;
688}
689
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700690/*
691 * Free ELF core header (new kernel)
692 */
693void elfcorehdr_free(unsigned long long addr)
694{
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700695 kfree((void *)(unsigned long)addr);
696}
697
698/*
699 * Read from ELF header
700 */
701ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
702{
703 void *src = (void *)(unsigned long)*ppos;
704
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700705 memcpy(buf, src, count);
706 *ppos += count;
707 return count;
708}
709
710/*
711 * Read from ELF notes data
712 */
713ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
714{
715 void *src = (void *)(unsigned long)*ppos;
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700716
Martin Schwidefsky8a07dd02015-10-14 15:53:06 +0200717 memcpy(buf, src, count);
Michael Holzheu97b0f6f2013-09-11 14:24:50 -0700718 *ppos += count;
719 return count;
720}