blob: 3e4f54799cc0a5a366a222b66ef87d9abd9b5a36 [file] [log] [blame]
Thomas Gleixner4317cf952019-05-31 01:09:38 -07001// SPDX-License-Identifier: GPL-2.0-only
John Reiser81d38582010-10-13 15:12:54 -04002/*
3 * recordmcount.c: construct a table of the locations of calls to 'mcount'
4 * so that ftrace can find them quickly.
5 * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
John Reiser81d38582010-10-13 15:12:54 -04006 *
7 * Restructured to fit Linux format, as well as other updates:
8 * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
9 */
10
11/*
12 * Strategy: alter the .o file in-place.
13 *
14 * Append a new STRTAB that has the new section names, followed by a new array
15 * ElfXX_Shdr[] that has the new section headers, followed by the section
16 * contents for __mcount_loc and its relocations. The old shstrtab strings,
17 * and the old ElfXX_Shdr[] array, remain as "garbage" (commonly, a couple
18 * kilobytes.) Subsequent processing by /bin/ld (or the kernel module loader)
19 * will ignore the garbage regions, because they are not designated by the
20 * new .e_shoff nor the new ElfXX_Shdr[]. [In order to remove the garbage,
21 * then use "ld -r" to create a new file that omits the garbage.]
22 */
23
24#include <sys/types.h>
25#include <sys/mman.h>
26#include <sys/stat.h>
Steven Rostedtdfad3d52011-04-12 18:53:25 -040027#include <getopt.h>
John Reiser81d38582010-10-13 15:12:54 -040028#include <elf.h>
29#include <fcntl.h>
John Reiser81d38582010-10-13 15:12:54 -040030#include <stdio.h>
31#include <stdlib.h>
32#include <string.h>
33#include <unistd.h>
34
AKASHI Takahiroaf64d2a2014-04-30 10:54:32 +010035#ifndef EM_AARCH64
36#define EM_AARCH64 183
Li Bin2ee8a742015-10-30 16:31:04 +080037#define R_AARCH64_NONE 0
AKASHI Takahiroaf64d2a2014-04-30 10:54:32 +010038#define R_AARCH64_ABS64 257
39#endif
40
Qing Zhanga0a458f2022-12-10 22:40:15 +080041#ifndef EM_LOONGARCH
42#define EM_LOONGARCH 258
43#define R_LARCH_32 1
44#define R_LARCH_64 2
45#define R_LARCH_MARK_LA 20
46#define R_LARCH_SOP_PUSH_PLT_PCREL 29
47#endif
48
Alex Sverdlin927d7802020-01-08 15:57:47 +010049#define R_ARM_PC24 1
50#define R_ARM_THM_CALL 10
51#define R_ARM_CALL 28
52
Christophe Leroy3df14262020-08-10 08:48:22 +000053#define R_AARCH64_CALL26 283
54
John Reiser81d38582010-10-13 15:12:54 -040055static int fd_map; /* File descriptor for file being modified. */
56static int mmap_failed; /* Boolean flag. */
John Reiser81d38582010-10-13 15:12:54 -040057static char gpfx; /* prefix for global symbol name (sometimes '_') */
58static struct stat sb; /* Remember .st_size, etc. */
Rabin Vincented604532010-11-30 17:36:48 +010059static const char *altmcount; /* alternate mcount symbol name */
Steven Rostedtdfad3d52011-04-12 18:53:25 -040060static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050061static void *file_map; /* pointer of the mapped file */
62static void *file_end; /* pointer to the end of the mapped file */
63static int file_updated; /* flag to state file was changed */
64static void *file_ptr; /* current file pointer location */
Matt Helsley4fbcf072019-07-31 11:24:16 -070065
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050066static void *file_append; /* added to the end of the file */
67static size_t file_append_size; /* how much is added to end of file */
John Reiser81d38582010-10-13 15:12:54 -040068
John Reiser81d38582010-10-13 15:12:54 -040069/* Per-file resource cleanup when multiple files. */
Matt Helsley4fbcf072019-07-31 11:24:16 -070070static void file_append_cleanup(void)
71{
72 free(file_append);
73 file_append = NULL;
74 file_append_size = 0;
75 file_updated = 0;
76}
77
78static void mmap_cleanup(void)
John Reiser81d38582010-10-13 15:12:54 -040079{
80 if (!mmap_failed)
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050081 munmap(file_map, sb.st_size);
John Reiser81d38582010-10-13 15:12:54 -040082 else
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050083 free(file_map);
84 file_map = NULL;
John Reiser81d38582010-10-13 15:12:54 -040085}
86
Matt Helsleya1462072019-07-24 14:04:56 -070087/* ulseek, uwrite, ...: Check return value for errors. */
John Reiser81d38582010-10-13 15:12:54 -040088
Matt Helsley3aec8632019-07-31 11:24:13 -070089static off_t ulseek(off_t const offset, int const whence)
John Reiser81d38582010-10-13 15:12:54 -040090{
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050091 switch (whence) {
92 case SEEK_SET:
93 file_ptr = file_map + offset;
94 break;
95 case SEEK_CUR:
96 file_ptr += offset;
97 break;
98 case SEEK_END:
99 file_ptr = file_map + (sb.st_size - offset);
100 break;
101 }
102 if (file_ptr < file_map) {
103 fprintf(stderr, "lseek: seek before file\n");
Matt Helsley3f1df122019-07-31 11:24:12 -0700104 return -1;
John Reiser81d38582010-10-13 15:12:54 -0400105 }
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500106 return file_ptr - file_map;
John Reiser81d38582010-10-13 15:12:54 -0400107}
108
Matt Helsley3aec8632019-07-31 11:24:13 -0700109static ssize_t uwrite(void const *const buf, size_t const count)
John Reiser81d38582010-10-13 15:12:54 -0400110{
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500111 size_t cnt = count;
112 off_t idx = 0;
Hao Zengfa359d02023-04-26 09:05:27 +0800113 void *p = NULL;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500114
115 file_updated = 1;
116
117 if (file_ptr + count >= file_end) {
118 off_t aoffset = (file_ptr + count) - file_end;
119
120 if (aoffset > file_append_size) {
Hao Zengfa359d02023-04-26 09:05:27 +0800121 p = realloc(file_append, aoffset);
122 if (!p)
123 free(file_append);
124 file_append = p;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500125 file_append_size = aoffset;
126 }
127 if (!file_append) {
128 perror("write");
Matt Helsley4fbcf072019-07-31 11:24:16 -0700129 file_append_cleanup();
130 mmap_cleanup();
Matt Helsley3f1df122019-07-31 11:24:12 -0700131 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500132 }
133 if (file_ptr < file_end) {
134 cnt = file_end - file_ptr;
135 } else {
136 cnt = 0;
137 idx = aoffset - count;
138 }
John Reiser81d38582010-10-13 15:12:54 -0400139 }
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500140
141 if (cnt)
142 memcpy(file_ptr, buf, cnt);
143
144 if (cnt < count)
145 memcpy(file_append + idx, buf + cnt, count - cnt);
146
147 file_ptr += count;
148 return count;
John Reiser81d38582010-10-13 15:12:54 -0400149}
150
Matt Helsley3aec8632019-07-31 11:24:13 -0700151static void * umalloc(size_t size)
John Reiser81d38582010-10-13 15:12:54 -0400152{
153 void *const addr = malloc(size);
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400154 if (addr == 0) {
John Reiser81d38582010-10-13 15:12:54 -0400155 fprintf(stderr, "malloc failed: %zu bytes\n", size);
Matt Helsley4fbcf072019-07-31 11:24:16 -0700156 file_append_cleanup();
157 mmap_cleanup();
Matt Helsley3f1df122019-07-31 11:24:12 -0700158 return NULL;
John Reiser81d38582010-10-13 15:12:54 -0400159 }
160 return addr;
161}
162
Matt Helsley4fbcf072019-07-31 11:24:16 -0700163/*
164 * Get the whole file as a programming convenience in order to avoid
165 * malloc+lseek+read+free of many pieces. If successful, then mmap
166 * avoids copying unused pieces; else just read the whole file.
167 * Open for both read and write; new info will be appended to the file.
168 * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr
169 * do not propagate to the file until an explicit overwrite at the last.
170 * This preserves most aspects of consistency (all except .st_size)
171 * for simultaneous readers of the file while we are appending to it.
172 * However, multiple writers still are bad. We choose not to use
173 * locking because it is expensive and the use case of kernel build
174 * makes multiple writers unlikely.
175 */
176static void *mmap_file(char const *fname)
177{
178 /* Avoid problems if early cleanup() */
179 fd_map = -1;
180 mmap_failed = 1;
181 file_map = NULL;
182 file_ptr = NULL;
183 file_updated = 0;
184 sb.st_size = 0;
185
186 fd_map = open(fname, O_RDONLY);
187 if (fd_map < 0) {
188 perror(fname);
189 return NULL;
190 }
191 if (fstat(fd_map, &sb) < 0) {
192 perror(fname);
193 goto out;
194 }
195 if (!S_ISREG(sb.st_mode)) {
196 fprintf(stderr, "not a regular file: %s\n", fname);
197 goto out;
198 }
199 file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
200 fd_map, 0);
201 if (file_map == MAP_FAILED) {
202 mmap_failed = 1;
203 file_map = umalloc(sb.st_size);
204 if (!file_map) {
205 perror(fname);
206 goto out;
207 }
208 if (read(fd_map, file_map, sb.st_size) != sb.st_size) {
209 perror(fname);
210 free(file_map);
211 file_map = NULL;
212 goto out;
213 }
214 } else
215 mmap_failed = 0;
216out:
217 close(fd_map);
218 fd_map = -1;
219
220 file_end = file_map + sb.st_size;
221
222 return file_map;
223}
224
225
Steven Rostedtffd618f2011-04-08 03:58:48 -0400226static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
227static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
228static unsigned char *ideal_nop;
229
230static char rel_type_nop;
231
232static int (*make_nop)(void *map, size_t const offset);
233
234static int make_nop_x86(void *map, size_t const offset)
235{
236 uint32_t *ptr;
237 unsigned char *op;
238
239 /* Confirm we have 0xe8 0x0 0x0 0x0 0x0 */
240 ptr = map + offset;
241 if (*ptr != 0)
242 return -1;
243
244 op = map + offset - 1;
245 if (*op != 0xe8)
246 return -1;
247
248 /* convert to nop */
Matt Helsley3f1df122019-07-31 11:24:12 -0700249 if (ulseek(offset - 1, SEEK_SET) < 0)
250 return -1;
251 if (uwrite(ideal_nop, 5) < 0)
252 return -1;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400253 return 0;
254}
255
Stephen Boyd9648dc12016-10-18 16:42:00 -0700256static unsigned char ideal_nop4_arm_le[4] = { 0x00, 0x00, 0xa0, 0xe1 }; /* mov r0, r0 */
257static unsigned char ideal_nop4_arm_be[4] = { 0xe1, 0xa0, 0x00, 0x00 }; /* mov r0, r0 */
258static unsigned char *ideal_nop4_arm;
259
260static unsigned char bl_mcount_arm_le[4] = { 0xfe, 0xff, 0xff, 0xeb }; /* bl */
261static unsigned char bl_mcount_arm_be[4] = { 0xeb, 0xff, 0xff, 0xfe }; /* bl */
262static unsigned char *bl_mcount_arm;
263
264static unsigned char push_arm_le[4] = { 0x04, 0xe0, 0x2d, 0xe5 }; /* push {lr} */
265static unsigned char push_arm_be[4] = { 0xe5, 0x2d, 0xe0, 0x04 }; /* push {lr} */
266static unsigned char *push_arm;
267
268static unsigned char ideal_nop2_thumb_le[2] = { 0x00, 0xbf }; /* nop */
269static unsigned char ideal_nop2_thumb_be[2] = { 0xbf, 0x00 }; /* nop */
270static unsigned char *ideal_nop2_thumb;
271
272static unsigned char push_bl_mcount_thumb_le[6] = { 0x00, 0xb5, 0xff, 0xf7, 0xfe, 0xff }; /* push {lr}, bl */
273static unsigned char push_bl_mcount_thumb_be[6] = { 0xb5, 0x00, 0xf7, 0xff, 0xff, 0xfe }; /* push {lr}, bl */
274static unsigned char *push_bl_mcount_thumb;
275
276static int make_nop_arm(void *map, size_t const offset)
277{
278 char *ptr;
279 int cnt = 1;
280 int nop_size;
281 size_t off = offset;
282
283 ptr = map + offset;
284 if (memcmp(ptr, bl_mcount_arm, 4) == 0) {
285 if (memcmp(ptr - 4, push_arm, 4) == 0) {
286 off -= 4;
287 cnt = 2;
288 }
289 ideal_nop = ideal_nop4_arm;
290 nop_size = 4;
291 } else if (memcmp(ptr - 2, push_bl_mcount_thumb, 6) == 0) {
292 cnt = 3;
293 nop_size = 2;
294 off -= 2;
295 ideal_nop = ideal_nop2_thumb;
296 } else
297 return -1;
298
299 /* Convert to nop */
Matt Helsley3f1df122019-07-31 11:24:12 -0700300 if (ulseek(off, SEEK_SET) < 0)
301 return -1;
Stephen Boyd9648dc12016-10-18 16:42:00 -0700302
303 do {
Matt Helsley3f1df122019-07-31 11:24:12 -0700304 if (uwrite(ideal_nop, nop_size) < 0)
305 return -1;
Stephen Boyd9648dc12016-10-18 16:42:00 -0700306 } while (--cnt > 0);
307
308 return 0;
309}
310
Li Bin2ee8a742015-10-30 16:31:04 +0800311static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
312static int make_nop_arm64(void *map, size_t const offset)
313{
314 uint32_t *ptr;
315
316 ptr = map + offset;
317 /* bl <_mcount> is 0x94000000 before relocation */
318 if (*ptr != 0x94000000)
319 return -1;
320
321 /* Convert to nop */
Matt Helsley3f1df122019-07-31 11:24:12 -0700322 if (ulseek(offset, SEEK_SET) < 0)
323 return -1;
324 if (uwrite(ideal_nop, 4) < 0)
325 return -1;
Li Bin2ee8a742015-10-30 16:31:04 +0800326 return 0;
327}
328
Matt Helsley3f1df122019-07-31 11:24:12 -0700329static int write_file(const char *fname)
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500330{
331 char tmp_file[strlen(fname) + 4];
332 size_t n;
333
334 if (!file_updated)
Matt Helsley3f1df122019-07-31 11:24:12 -0700335 return 0;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500336
337 sprintf(tmp_file, "%s.rc", fname);
338
339 /*
340 * After reading the entire file into memory, delete it
341 * and write it back, to prevent weird side effects of modifying
342 * an object file in place.
343 */
344 fd_map = open(tmp_file, O_WRONLY | O_TRUNC | O_CREAT, sb.st_mode);
345 if (fd_map < 0) {
346 perror(fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700347 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500348 }
349 n = write(fd_map, file_map, sb.st_size);
350 if (n != sb.st_size) {
351 perror("write");
Matt Helsley3f1df122019-07-31 11:24:12 -0700352 close(fd_map);
353 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500354 }
355 if (file_append_size) {
356 n = write(fd_map, file_append, file_append_size);
357 if (n != file_append_size) {
358 perror("write");
Matt Helsley3f1df122019-07-31 11:24:12 -0700359 close(fd_map);
360 return -1;
Russell Kingdd39a262015-12-11 12:09:03 +0000361 }
Russell Kingdd39a262015-12-11 12:09:03 +0000362 }
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500363 close(fd_map);
364 if (rename(tmp_file, fname) < 0) {
365 perror(fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700366 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500367 }
Matt Helsley3f1df122019-07-31 11:24:12 -0700368 return 0;
John Reiser81d38582010-10-13 15:12:54 -0400369}
370
371/* w8rev, w8nat, ...: Handle endianness. */
372
373static uint64_t w8rev(uint64_t const x)
374{
375 return ((0xff & (x >> (0 * 8))) << (7 * 8))
376 | ((0xff & (x >> (1 * 8))) << (6 * 8))
377 | ((0xff & (x >> (2 * 8))) << (5 * 8))
378 | ((0xff & (x >> (3 * 8))) << (4 * 8))
379 | ((0xff & (x >> (4 * 8))) << (3 * 8))
380 | ((0xff & (x >> (5 * 8))) << (2 * 8))
381 | ((0xff & (x >> (6 * 8))) << (1 * 8))
382 | ((0xff & (x >> (7 * 8))) << (0 * 8));
383}
384
385static uint32_t w4rev(uint32_t const x)
386{
387 return ((0xff & (x >> (0 * 8))) << (3 * 8))
388 | ((0xff & (x >> (1 * 8))) << (2 * 8))
389 | ((0xff & (x >> (2 * 8))) << (1 * 8))
390 | ((0xff & (x >> (3 * 8))) << (0 * 8));
391}
392
393static uint32_t w2rev(uint16_t const x)
394{
395 return ((0xff & (x >> (0 * 8))) << (1 * 8))
396 | ((0xff & (x >> (1 * 8))) << (0 * 8));
397}
398
399static uint64_t w8nat(uint64_t const x)
400{
401 return x;
402}
403
404static uint32_t w4nat(uint32_t const x)
405{
406 return x;
407}
408
409static uint32_t w2nat(uint16_t const x)
410{
411 return x;
412}
413
414static uint64_t (*w8)(uint64_t);
415static uint32_t (*w)(uint32_t);
416static uint32_t (*w2)(uint16_t);
417
418/* Names of the sections that could contain calls to mcount. */
Matt Helsley3aec8632019-07-31 11:24:13 -0700419static int is_mcounted_section_name(char const *const txtname)
John Reiser81d38582010-10-13 15:12:54 -0400420{
Joe Lawrence9c8e2f62018-11-20 15:19:18 -0500421 return strncmp(".text", txtname, 5) == 0 ||
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -0500422 strcmp(".init.text", txtname) == 0 ||
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400423 strcmp(".ref.text", txtname) == 0 ||
424 strcmp(".sched.text", txtname) == 0 ||
425 strcmp(".spinlock.text", txtname) == 0 ||
426 strcmp(".irqentry.text", txtname) == 0 ||
Dmitry Vyukove436fd62016-09-28 15:22:36 -0700427 strcmp(".softirqentry.text", txtname) == 0 ||
Steven Rostedt9f087e72011-04-06 14:10:22 -0400428 strcmp(".kprobes.text", txtname) == 0 ||
Matt Helsley1bd95be2019-07-24 14:04:55 -0700429 strcmp(".cpuidle.text", txtname) == 0;
John Reiser81d38582010-10-13 15:12:54 -0400430}
431
Matt Helsley3f1df122019-07-31 11:24:12 -0700432static char const *already_has_rel_mcount = "success"; /* our work here is done! */
433
Steven Rostedtc28d5072010-10-13 19:06:14 -0400434/* 32 bit and 64 bit are very similar */
435#include "recordmcount.h"
436#define RECORD_MCOUNT_64
437#include "recordmcount.h"
John Reiser81d38582010-10-13 15:12:54 -0400438
Alex Sverdlin927d7802020-01-08 15:57:47 +0100439static int arm_is_fake_mcount(Elf32_Rel const *rp)
440{
441 switch (ELF32_R_TYPE(w(rp->r_info))) {
442 case R_ARM_THM_CALL:
443 case R_ARM_CALL:
444 case R_ARM_PC24:
445 return 0;
446 }
447
448 return 1;
449}
450
Gregory Herreroea0eada2020-07-17 16:33:38 +0200451static int arm64_is_fake_mcount(Elf64_Rel const *rp)
452{
Chen Jun999340d2021-02-22 13:58:40 +0000453 return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26;
Gregory Herreroea0eada2020-07-17 16:33:38 +0200454}
455
Qing Zhanga0a458f2022-12-10 22:40:15 +0800456static int LARCH32_is_fake_mcount(Elf32_Rel const *rp)
457{
458 switch (ELF64_R_TYPE(w(rp->r_info))) {
459 case R_LARCH_MARK_LA:
460 case R_LARCH_SOP_PUSH_PLT_PCREL:
461 return 0;
462 }
463
464 return 1;
465}
466
467static int LARCH64_is_fake_mcount(Elf64_Rel const *rp)
468{
469 switch (ELF64_R_TYPE(w(rp->r_info))) {
470 case R_LARCH_MARK_LA:
471 case R_LARCH_SOP_PUSH_PLT_PCREL:
472 return 0;
473 }
474
475 return 1;
476}
477
John Reisera2d493582010-10-27 18:59:07 +0800478/* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
479 * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
480 * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
481 * to imply the order of the members; the spec does not say so.
482 * typedef unsigned char Elf64_Byte;
483 * fails on MIPS64 because their <elf.h> already has it!
484 */
485
486typedef uint8_t myElf64_Byte; /* Type for a 8-bit quantity. */
487
488union mips_r_info {
489 Elf64_Xword r_info;
490 struct {
491 Elf64_Word r_sym; /* Symbol index. */
492 myElf64_Byte r_ssym; /* Special symbol. */
493 myElf64_Byte r_type3; /* Third relocation. */
494 myElf64_Byte r_type2; /* Second relocation. */
495 myElf64_Byte r_type; /* First relocation. */
496 } r_mips;
497};
498
499static uint64_t MIPS64_r_sym(Elf64_Rel const *rp)
500{
501 return w(((union mips_r_info){ .r_info = rp->r_info }).r_mips.r_sym);
502}
503
504static void MIPS64_r_info(Elf64_Rel *const rp, unsigned sym, unsigned type)
505{
506 rp->r_info = ((union mips_r_info){
507 .r_mips = { .r_sym = w(sym), .r_type = type }
508 }).r_info;
509}
510
Matt Helsley3aec8632019-07-31 11:24:13 -0700511static int do_file(char const *const fname)
John Reiser81d38582010-10-13 15:12:54 -0400512{
John Reiser81d38582010-10-13 15:12:54 -0400513 unsigned int reltype = 0;
Matt Helsley4fbcf072019-07-31 11:24:16 -0700514 Elf32_Ehdr *ehdr;
Matt Helsley3f1df122019-07-31 11:24:12 -0700515 int rc = -1;
516
Matt Helsley4fbcf072019-07-31 11:24:16 -0700517 ehdr = mmap_file(fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700518 if (!ehdr)
519 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400520
John Reiser81d38582010-10-13 15:12:54 -0400521 w = w4nat;
522 w2 = w2nat;
523 w8 = w8nat;
524 switch (ehdr->e_ident[EI_DATA]) {
525 static unsigned int const endian = 1;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400526 default:
John Reiser81d38582010-10-13 15:12:54 -0400527 fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
528 ehdr->e_ident[EI_DATA], fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700529 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400530 case ELFDATA2LSB:
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400531 if (*(unsigned char const *)&endian != 1) {
John Reiser81d38582010-10-13 15:12:54 -0400532 /* main() is big endian, file.o is little endian. */
533 w = w4rev;
534 w2 = w2rev;
535 w8 = w8rev;
536 }
Stephen Boyd9648dc12016-10-18 16:42:00 -0700537 ideal_nop4_arm = ideal_nop4_arm_le;
538 bl_mcount_arm = bl_mcount_arm_le;
539 push_arm = push_arm_le;
540 ideal_nop2_thumb = ideal_nop2_thumb_le;
541 push_bl_mcount_thumb = push_bl_mcount_thumb_le;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400542 break;
543 case ELFDATA2MSB:
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400544 if (*(unsigned char const *)&endian != 0) {
John Reiser81d38582010-10-13 15:12:54 -0400545 /* main() is little endian, file.o is big endian. */
546 w = w4rev;
547 w2 = w2rev;
548 w8 = w8rev;
549 }
Stephen Boyd9648dc12016-10-18 16:42:00 -0700550 ideal_nop4_arm = ideal_nop4_arm_be;
551 bl_mcount_arm = bl_mcount_arm_be;
552 push_arm = push_arm_be;
553 ideal_nop2_thumb = ideal_nop2_thumb_be;
554 push_bl_mcount_thumb = push_bl_mcount_thumb_be;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400555 break;
John Reiser81d38582010-10-13 15:12:54 -0400556 } /* end switch */
Matt Helsley2e631522019-07-31 11:24:14 -0700557 if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
558 w2(ehdr->e_type) != ET_REL ||
559 ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
John Reiser81d38582010-10-13 15:12:54 -0400560 fprintf(stderr, "unrecognized ET_REL file %s\n", fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700561 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400562 }
563
Matt Helsley2e631522019-07-31 11:24:14 -0700564 gpfx = '_';
John Reiser81d38582010-10-13 15:12:54 -0400565 switch (w2(ehdr->e_machine)) {
Steven Rostedte90b0c82011-04-06 13:32:24 -0400566 default:
nixiaomingac5db1f2018-05-24 11:16:12 +0800567 fprintf(stderr, "unrecognized e_machine %u %s\n",
John Reiser81d38582010-10-13 15:12:54 -0400568 w2(ehdr->e_machine), fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700569 goto out;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400570 case EM_386:
571 reltype = R_386_32;
Li Bin46a2b612015-10-28 16:23:26 +0800572 rel_type_nop = R_386_NONE;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400573 make_nop = make_nop_x86;
574 ideal_nop = ideal_nop5_x86_32;
Martin Schwidefsky521ccb52011-05-10 10:10:41 +0200575 mcount_adjust_32 = -1;
Matt Helsley2e631522019-07-31 11:24:14 -0700576 gpfx = 0;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400577 break;
Matt Helsley2e631522019-07-31 11:24:14 -0700578 case EM_ARM:
579 reltype = R_ARM_ABS32;
580 altmcount = "__gnu_mcount_nc";
581 make_nop = make_nop_arm;
582 rel_type_nop = R_ARM_NONE;
Alex Sverdlin927d7802020-01-08 15:57:47 +0100583 is_fake_mcount32 = arm_is_fake_mcount;
Matt Helsley2e631522019-07-31 11:24:14 -0700584 gpfx = 0;
585 break;
AKASHI Takahiroaf64d2a2014-04-30 10:54:32 +0100586 case EM_AARCH64:
Matt Helsley2e631522019-07-31 11:24:14 -0700587 reltype = R_AARCH64_ABS64;
588 make_nop = make_nop_arm64;
589 rel_type_nop = R_AARCH64_NONE;
590 ideal_nop = ideal_nop4_arm64;
Gregory Herreroea0eada2020-07-17 16:33:38 +0200591 is_fake_mcount64 = arm64_is_fake_mcount;
Matt Helsley2e631522019-07-31 11:24:14 -0700592 break;
Matt Helsley2e631522019-07-31 11:24:14 -0700593 case EM_MIPS: /* reltype: e_class */ break;
Qing Zhanga0a458f2022-12-10 22:40:15 +0800594 case EM_LOONGARCH: /* reltype: e_class */ break;
Matt Helsley2e631522019-07-31 11:24:14 -0700595 case EM_PPC: reltype = R_PPC_ADDR32; break;
596 case EM_PPC64: reltype = R_PPC64_ADDR64; break;
597 case EM_S390: /* reltype: e_class */ break;
598 case EM_SH: reltype = R_SH_DIR32; gpfx = 0; break;
599 case EM_SPARCV9: reltype = R_SPARC_64; break;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400600 case EM_X86_64:
601 make_nop = make_nop_x86;
602 ideal_nop = ideal_nop5_x86_64;
603 reltype = R_X86_64_64;
Li Bin46a2b612015-10-28 16:23:26 +0800604 rel_type_nop = R_X86_64_NONE;
Martin Schwidefsky521ccb52011-05-10 10:10:41 +0200605 mcount_adjust_64 = -1;
Matt Helsley2e631522019-07-31 11:24:14 -0700606 gpfx = 0;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400607 break;
John Reiser81d38582010-10-13 15:12:54 -0400608 } /* end switch */
609
610 switch (ehdr->e_ident[EI_CLASS]) {
Steven Rostedte90b0c82011-04-06 13:32:24 -0400611 default:
John Reiser81d38582010-10-13 15:12:54 -0400612 fprintf(stderr, "unrecognized ELF class %d %s\n",
613 ehdr->e_ident[EI_CLASS], fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700614 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400615 case ELFCLASS32:
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400616 if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
617 || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
John Reiser81d38582010-10-13 15:12:54 -0400618 fprintf(stderr,
619 "unrecognized ET_REL file: %s\n", fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700620 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400621 }
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400622 if (w2(ehdr->e_machine) == EM_MIPS) {
John Reisera2d493582010-10-27 18:59:07 +0800623 reltype = R_MIPS_32;
Wu Zhangjin412910c2010-10-27 18:59:08 +0800624 is_fake_mcount32 = MIPS32_is_fake_mcount;
625 }
Qing Zhanga0a458f2022-12-10 22:40:15 +0800626 if (w2(ehdr->e_machine) == EM_LOONGARCH) {
627 reltype = R_LARCH_32;
628 is_fake_mcount32 = LARCH32_is_fake_mcount;
629 }
Matt Helsley3f1df122019-07-31 11:24:12 -0700630 if (do32(ehdr, fname, reltype) < 0)
631 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400632 break;
John Reiser81d38582010-10-13 15:12:54 -0400633 case ELFCLASS64: {
634 Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400635 if (w2(ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
636 || w2(ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
John Reiser81d38582010-10-13 15:12:54 -0400637 fprintf(stderr,
638 "unrecognized ET_REL file: %s\n", fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700639 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400640 }
Martin Schwidefskyf2963882011-05-10 10:10:43 +0200641 if (w2(ghdr->e_machine) == EM_S390) {
John Reiser81d38582010-10-13 15:12:54 -0400642 reltype = R_390_64;
Heiko Carstensc9331462014-10-15 12:17:38 +0200643 mcount_adjust_64 = -14;
Martin Schwidefskyf2963882011-05-10 10:10:43 +0200644 }
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400645 if (w2(ghdr->e_machine) == EM_MIPS) {
John Reisera2d493582010-10-27 18:59:07 +0800646 reltype = R_MIPS_64;
647 Elf64_r_sym = MIPS64_r_sym;
648 Elf64_r_info = MIPS64_r_info;
Wu Zhangjin412910c2010-10-27 18:59:08 +0800649 is_fake_mcount64 = MIPS64_is_fake_mcount;
John Reisera2d493582010-10-27 18:59:07 +0800650 }
Qing Zhanga0a458f2022-12-10 22:40:15 +0800651 if (w2(ghdr->e_machine) == EM_LOONGARCH) {
652 reltype = R_LARCH_64;
653 is_fake_mcount64 = LARCH64_is_fake_mcount;
654 }
Matt Helsley3f1df122019-07-31 11:24:12 -0700655 if (do64(ghdr, fname, reltype) < 0)
656 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400657 break;
658 }
John Reiser81d38582010-10-13 15:12:54 -0400659 } /* end switch */
660
Matt Helsley3f1df122019-07-31 11:24:12 -0700661 rc = write_file(fname);
662out:
Matt Helsley4fbcf072019-07-31 11:24:16 -0700663 file_append_cleanup();
664 mmap_cleanup();
Matt Helsley3f1df122019-07-31 11:24:12 -0700665 return rc;
John Reiser81d38582010-10-13 15:12:54 -0400666}
667
Matt Helsley3aec8632019-07-31 11:24:13 -0700668int main(int argc, char *argv[])
John Reiser81d38582010-10-13 15:12:54 -0400669{
Rabin Vincentcd3478f2010-11-30 17:33:53 +0100670 const char ftrace[] = "/ftrace.o";
Steven Rostedt44475862010-10-15 11:49:47 -0400671 int ftrace_size = sizeof(ftrace) - 1;
John Reiser81d38582010-10-13 15:12:54 -0400672 int n_error = 0; /* gcc-4.3.0 false positive complaint */
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400673 int c;
674 int i;
Steven Rostedt44475862010-10-15 11:49:47 -0400675
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400676 while ((c = getopt(argc, argv, "w")) >= 0) {
677 switch (c) {
678 case 'w':
679 warn_on_notrace_sect = 1;
680 break;
681 default:
682 fprintf(stderr, "usage: recordmcount [-w] file.o...\n");
683 return 0;
684 }
685 }
686
687 if ((argc - optind) < 1) {
688 fprintf(stderr, "usage: recordmcount [-w] file.o...\n");
Steven Rostedt44475862010-10-15 11:49:47 -0400689 return 0;
690 }
691
692 /* Process each file in turn, allowing deep failure. */
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400693 for (i = optind; i < argc; i++) {
694 char *file = argv[i];
Steven Rostedt44475862010-10-15 11:49:47 -0400695 int len;
696
697 /*
698 * The file kernel/trace/ftrace.o references the mcount
699 * function but does not call it. Since ftrace.o should
700 * not be traced anyway, we just skip it.
701 */
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400702 len = strlen(file);
Steven Rostedt44475862010-10-15 11:49:47 -0400703 if (len >= ftrace_size &&
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400704 strcmp(file + (len - ftrace_size), ftrace) == 0)
Steven Rostedt44475862010-10-15 11:49:47 -0400705 continue;
706
Matt Helsley3f1df122019-07-31 11:24:12 -0700707 if (do_file(file)) {
Colin Ian King713a3e42015-12-30 23:06:41 +0000708 fprintf(stderr, "%s: failed\n", file);
John Reiser81d38582010-10-13 15:12:54 -0400709 ++n_error;
Matt Helsley3f1df122019-07-31 11:24:12 -0700710 }
John Reiser81d38582010-10-13 15:12:54 -0400711 }
712 return !!n_error;
713}