Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_ELF_H |
| 3 | #define _ASM_X86_ELF_H |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 4 | |
| 5 | /* |
| 6 | * ELF register definitions.. |
| 7 | */ |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 8 | #include <linux/thread_info.h> |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 9 | |
| 10 | #include <asm/ptrace.h> |
| 11 | #include <asm/user.h> |
| 12 | #include <asm/auxvec.h> |
Chang S. Bae | 824eea3 | 2018-09-18 16:08:55 -0700 | [diff] [blame] | 13 | #include <asm/fsgsbase.h> |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 14 | |
| 15 | typedef unsigned long elf_greg_t; |
| 16 | |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 17 | #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 18 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
| 19 | |
| 20 | typedef struct user_i387_struct elf_fpregset_t; |
| 21 | |
| 22 | #ifdef __i386__ |
| 23 | |
| 24 | typedef struct user_fxsr_struct elf_fpxregset_t; |
| 25 | |
| 26 | #define R_386_NONE 0 |
| 27 | #define R_386_32 1 |
| 28 | #define R_386_PC32 2 |
| 29 | #define R_386_GOT32 3 |
| 30 | #define R_386_PLT32 4 |
| 31 | #define R_386_COPY 5 |
| 32 | #define R_386_GLOB_DAT 6 |
| 33 | #define R_386_JMP_SLOT 7 |
| 34 | #define R_386_RELATIVE 8 |
| 35 | #define R_386_GOTOFF 9 |
| 36 | #define R_386_GOTPC 10 |
| 37 | #define R_386_NUM 11 |
| 38 | |
| 39 | /* |
| 40 | * These are used to set parameters in the core dumps. |
| 41 | */ |
| 42 | #define ELF_CLASS ELFCLASS32 |
| 43 | #define ELF_DATA ELFDATA2LSB |
| 44 | #define ELF_ARCH EM_386 |
| 45 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 46 | #else |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 47 | |
| 48 | /* x86-64 relocation types */ |
| 49 | #define R_X86_64_NONE 0 /* No reloc */ |
| 50 | #define R_X86_64_64 1 /* Direct 64 bit */ |
| 51 | #define R_X86_64_PC32 2 /* PC relative 32 bit signed */ |
| 52 | #define R_X86_64_GOT32 3 /* 32 bit GOT entry */ |
| 53 | #define R_X86_64_PLT32 4 /* 32 bit PLT address */ |
| 54 | #define R_X86_64_COPY 5 /* Copy symbol at runtime */ |
| 55 | #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ |
| 56 | #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ |
| 57 | #define R_X86_64_RELATIVE 8 /* Adjust by program base */ |
| 58 | #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative |
| 59 | offset to GOT */ |
| 60 | #define R_X86_64_32 10 /* Direct 32 bit zero extended */ |
| 61 | #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ |
| 62 | #define R_X86_64_16 12 /* Direct 16 bit zero extended */ |
| 63 | #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ |
| 64 | #define R_X86_64_8 14 /* Direct 8 bit sign extended */ |
| 65 | #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ |
Ard Biesheuvel | b40a142 | 2018-09-18 23:51:39 -0700 | [diff] [blame] | 66 | #define R_X86_64_PC64 24 /* Place relative 64-bit signed */ |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 67 | |
| 68 | /* |
| 69 | * These are used to set parameters in the core dumps. |
| 70 | */ |
| 71 | #define ELF_CLASS ELFCLASS64 |
| 72 | #define ELF_DATA ELFDATA2LSB |
| 73 | #define ELF_ARCH EM_X86_64 |
| 74 | |
| 75 | #endif |
| 76 | |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 77 | #include <asm/vdso.h> |
| 78 | |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 79 | #ifdef CONFIG_X86_64 |
| 80 | extern unsigned int vdso64_enabled; |
| 81 | #endif |
Brian Gerst | ab8b82ee6 | 2015-06-22 07:55:15 -0400 | [diff] [blame] | 82 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 83 | extern unsigned int vdso32_enabled; |
| 84 | #endif |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 85 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 86 | /* |
| 87 | * This is used to ensure we don't load something for the wrong architecture. |
| 88 | */ |
| 89 | #define elf_check_arch_ia32(x) \ |
| 90 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) |
| 91 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 92 | #include <asm/processor.h> |
Joe Perches | e40c0fe | 2008-03-09 12:35:00 -0700 | [diff] [blame] | 93 | |
| 94 | #ifdef CONFIG_X86_32 |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 95 | #include <asm/desc.h> |
| 96 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 97 | #define elf_check_arch(x) elf_check_arch_ia32(x) |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 98 | |
| 99 | /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx |
| 100 | contains a pointer to a function which might be registered using `atexit'. |
| 101 | This provides a mean for the dynamic linker to call DT_FINI functions for |
| 102 | shared libraries that have been loaded before the code runs. |
| 103 | |
| 104 | A value of 0 tells we have no such handler. |
| 105 | |
| 106 | We might as well make sure everything else is cleared too (except for %esp), |
| 107 | just to make things more deterministic. |
| 108 | */ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 109 | #define ELF_PLAT_INIT(_r, load_addr) \ |
| 110 | do { \ |
| 111 | _r->bx = 0; _r->cx = 0; _r->dx = 0; \ |
| 112 | _r->si = 0; _r->di = 0; _r->bp = 0; \ |
| 113 | _r->ax = 0; \ |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 114 | } while (0) |
| 115 | |
Hiroshi Shimamoto | 0649547 | 2008-01-30 13:33:16 +0100 | [diff] [blame] | 116 | /* |
| 117 | * regs is struct pt_regs, pr_reg is elf_gregset_t (which is |
| 118 | * now struct_user_regs, they are different) |
| 119 | */ |
| 120 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 121 | #define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 122 | do { \ |
| 123 | pr_reg[0] = regs->bx; \ |
| 124 | pr_reg[1] = regs->cx; \ |
| 125 | pr_reg[2] = regs->dx; \ |
| 126 | pr_reg[3] = regs->si; \ |
| 127 | pr_reg[4] = regs->di; \ |
| 128 | pr_reg[5] = regs->bp; \ |
| 129 | pr_reg[6] = regs->ax; \ |
Andy Lutomirski | 9950481 | 2017-07-28 06:00:32 -0700 | [diff] [blame] | 130 | pr_reg[7] = regs->ds; \ |
| 131 | pr_reg[8] = regs->es; \ |
| 132 | pr_reg[9] = regs->fs; \ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 133 | pr_reg[11] = regs->orig_ax; \ |
| 134 | pr_reg[12] = regs->ip; \ |
Andy Lutomirski | 9950481 | 2017-07-28 06:00:32 -0700 | [diff] [blame] | 135 | pr_reg[13] = regs->cs; \ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 136 | pr_reg[14] = regs->flags; \ |
| 137 | pr_reg[15] = regs->sp; \ |
Andy Lutomirski | 9950481 | 2017-07-28 06:00:32 -0700 | [diff] [blame] | 138 | pr_reg[16] = regs->ss; \ |
Hiroshi Shimamoto | 0649547 | 2008-01-30 13:33:16 +0100 | [diff] [blame] | 139 | } while (0); |
| 140 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 141 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ |
| 142 | do { \ |
| 143 | ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\ |
| 144 | pr_reg[10] = get_user_gs(regs); \ |
| 145 | } while (0); |
| 146 | |
| 147 | #define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \ |
| 148 | do { \ |
| 149 | ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\ |
| 150 | savesegment(gs, pr_reg[10]); \ |
| 151 | } while (0); |
| 152 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 153 | #define ELF_PLATFORM (utsname()->machine) |
| 154 | #define set_personality_64bit() do { } while (0) |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 155 | |
| 156 | #else /* CONFIG_X86_32 */ |
| 157 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 158 | /* |
| 159 | * This is used to ensure we don't load something for the wrong architecture. |
| 160 | */ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 161 | #define elf_check_arch(x) \ |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 162 | ((x)->e_machine == EM_X86_64) |
| 163 | |
Ben Hutchings | 0e6d311 | 2014-09-07 21:05:05 +0100 | [diff] [blame] | 164 | #define compat_elf_check_arch(x) \ |
| 165 | (elf_check_arch_ia32(x) || \ |
| 166 | (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64)) |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 167 | |
| 168 | #if __USER32_DS != __USER_DS |
| 169 | # error "The following code assumes __USER32_DS == __USER_DS" |
| 170 | #endif |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 171 | |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 172 | static inline void elf_common_init(struct thread_struct *t, |
| 173 | struct pt_regs *regs, const u16 ds) |
| 174 | { |
Andy Lutomirski | 7bcdea4 | 2015-10-05 17:48:00 -0700 | [diff] [blame] | 175 | /* ax gets execve's return value. */ |
| 176 | /*regs->ax = */ regs->bx = regs->cx = regs->dx = 0; |
| 177 | regs->si = regs->di = regs->bp = 0; |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 178 | regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0; |
Andy Lutomirski | 7bcdea4 | 2015-10-05 17:48:00 -0700 | [diff] [blame] | 179 | regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 180 | t->fsbase = t->gsbase = 0; |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 181 | t->fsindex = t->gsindex = 0; |
| 182 | t->ds = t->es = ds; |
| 183 | } |
| 184 | |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 185 | #define ELF_PLAT_INIT(_r, load_addr) \ |
Oleg Nesterov | 11557b2 | 2010-02-16 15:24:01 +0100 | [diff] [blame] | 186 | elf_common_init(¤t->thread, _r, 0) |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 187 | |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 188 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 189 | elf_common_init(¤t->thread, regs, __USER_DS) |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 190 | |
Brian Gerst | 7da7707 | 2015-06-22 07:55:13 -0400 | [diff] [blame] | 191 | void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp); |
| 192 | #define compat_start_thread compat_start_thread |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 193 | |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 194 | void set_personality_ia32(bool); |
| 195 | #define COMPAT_SET_PERSONALITY(ex) \ |
| 196 | set_personality_ia32((ex).e_machine == EM_X86_64) |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 197 | |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 198 | #define COMPAT_ELF_PLATFORM ("i686") |
| 199 | |
Hiroshi Shimamoto | 0649547 | 2008-01-30 13:33:16 +0100 | [diff] [blame] | 200 | /* |
| 201 | * regs is struct pt_regs, pr_reg is elf_gregset_t (which is |
| 202 | * now struct_user_regs, they are different). Assumes current is the process |
| 203 | * getting dumped. |
| 204 | */ |
| 205 | |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 206 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ |
| 207 | do { \ |
Hiroshi Shimamoto | 0649547 | 2008-01-30 13:33:16 +0100 | [diff] [blame] | 208 | unsigned v; \ |
| 209 | (pr_reg)[0] = (regs)->r15; \ |
| 210 | (pr_reg)[1] = (regs)->r14; \ |
| 211 | (pr_reg)[2] = (regs)->r13; \ |
| 212 | (pr_reg)[3] = (regs)->r12; \ |
| 213 | (pr_reg)[4] = (regs)->bp; \ |
| 214 | (pr_reg)[5] = (regs)->bx; \ |
| 215 | (pr_reg)[6] = (regs)->r11; \ |
| 216 | (pr_reg)[7] = (regs)->r10; \ |
| 217 | (pr_reg)[8] = (regs)->r9; \ |
| 218 | (pr_reg)[9] = (regs)->r8; \ |
| 219 | (pr_reg)[10] = (regs)->ax; \ |
| 220 | (pr_reg)[11] = (regs)->cx; \ |
| 221 | (pr_reg)[12] = (regs)->dx; \ |
| 222 | (pr_reg)[13] = (regs)->si; \ |
| 223 | (pr_reg)[14] = (regs)->di; \ |
| 224 | (pr_reg)[15] = (regs)->orig_ax; \ |
| 225 | (pr_reg)[16] = (regs)->ip; \ |
| 226 | (pr_reg)[17] = (regs)->cs; \ |
| 227 | (pr_reg)[18] = (regs)->flags; \ |
| 228 | (pr_reg)[19] = (regs)->sp; \ |
| 229 | (pr_reg)[20] = (regs)->ss; \ |
Chang S. Bae | 824eea3 | 2018-09-18 16:08:55 -0700 | [diff] [blame] | 230 | (pr_reg)[21] = x86_fsbase_read_cpu(); \ |
| 231 | (pr_reg)[22] = x86_gsbase_read_cpu_inactive(); \ |
Hiroshi Shimamoto | 0649547 | 2008-01-30 13:33:16 +0100 | [diff] [blame] | 232 | asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \ |
| 233 | asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ |
| 234 | asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ |
| 235 | asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ |
| 236 | } while (0); |
| 237 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 238 | /* I'm not sure if we can use '-' here */ |
| 239 | #define ELF_PLATFORM ("x86_64") |
| 240 | extern void set_personality_64bit(void); |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 241 | extern unsigned int sysctl_vsyscall32; |
| 242 | extern int force_personality32; |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 243 | |
| 244 | #endif /* !CONFIG_X86_32 */ |
| 245 | |
Roland McGrath | 975511b | 2008-01-30 13:31:54 +0100 | [diff] [blame] | 246 | #define CORE_DUMP_USE_REGSET |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 247 | #define ELF_EXEC_PAGESIZE 4096 |
| 248 | |
Kees Cook | eab0953 | 2017-07-10 15:52:37 -0700 | [diff] [blame] | 249 | /* |
| 250 | * This is the base location for PIE (ET_DYN with INTERP) loads. On |
Kees Cook | c715b72 | 2017-08-18 15:16:31 -0700 | [diff] [blame] | 251 | * 64-bit, this is above 4GB to leave the entire 32-bit address |
Kees Cook | eab0953 | 2017-07-10 15:52:37 -0700 | [diff] [blame] | 252 | * space open for things that want to use the area for 32-bit pointers. |
| 253 | */ |
| 254 | #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ |
Kirill A. Shutemov | be739f4 | 2017-11-07 13:38:04 +0300 | [diff] [blame] | 255 | (DEFAULT_MAP_WINDOW / 3 * 2)) |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 256 | |
| 257 | /* This yields a mask that user programs can use to figure out what |
| 258 | instruction set this CPU supports. This could be done in user space, |
| 259 | but it's not easy, and we've already done it here. */ |
| 260 | |
Huaitong Han | 16aaa53 | 2016-01-25 20:41:47 +0100 | [diff] [blame] | 261 | #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX]) |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 262 | |
Grzegorz Andrejczuk | 0274f95 | 2017-01-20 14:22:34 +0100 | [diff] [blame] | 263 | extern u32 elf_hwcap2; |
| 264 | |
| 265 | /* |
| 266 | * HWCAP2 supplies mask with kernel enabled CPU features, so that |
| 267 | * the application can discover that it can safely use them. |
| 268 | * The bits are defined in uapi/asm/hwcap2.h. |
| 269 | */ |
| 270 | #define ELF_HWCAP2 (elf_hwcap2) |
| 271 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 272 | /* This yields a string that ld.so will use to load implementation |
| 273 | specific libraries for optimization. This is more specific in |
| 274 | intent than poking at uname or /proc/cpuinfo. |
| 275 | |
| 276 | For the moment, we have only optimizations for the Intel generations, |
| 277 | but that could change... */ |
| 278 | |
Martin Schwidefsky | 0b59268 | 2008-10-16 15:39:57 +0200 | [diff] [blame] | 279 | #define SET_PERSONALITY(ex) set_personality_64bit() |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 280 | |
| 281 | /* |
| 282 | * An executable for which elf_read_implies_exec() returns TRUE will |
| 283 | * have the READ_IMPLIES_EXEC personality flag set automatically. |
| 284 | */ |
| 285 | #define elf_read_implies_exec(ex, executable_stack) \ |
| 286 | (executable_stack != EXSTACK_DISABLE_X) |
| 287 | |
| 288 | struct task_struct; |
| 289 | |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 290 | #define ARCH_DLINFO_IA32 \ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 291 | do { \ |
Thomas Gleixner | 6fdc6dd | 2017-04-10 17:14:28 +0200 | [diff] [blame] | 292 | if (VDSO_CURRENT_BASE) { \ |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 293 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ |
| 294 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 295 | } \ |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 296 | } while (0) |
| 297 | |
Dmitry Safonov | 8f3e474 | 2017-03-06 17:17:18 +0300 | [diff] [blame] | 298 | /* |
| 299 | * True on X86_32 or when emulating IA32 on X86_64 |
| 300 | */ |
| 301 | static inline int mmap_is_ia32(void) |
| 302 | { |
| 303 | return IS_ENABLED(CONFIG_X86_32) || |
| 304 | (IS_ENABLED(CONFIG_COMPAT) && |
| 305 | test_thread_flag(TIF_ADDR32)); |
| 306 | } |
| 307 | |
Kirill A. Shutemov | e8f01a8 | 2017-07-17 01:59:50 +0300 | [diff] [blame] | 308 | extern unsigned long task_size_32bit(void); |
Kirill A. Shutemov | b569bab | 2017-07-17 01:59:52 +0300 | [diff] [blame] | 309 | extern unsigned long task_size_64bit(int full_addr_space); |
Dmitry Safonov | e13b73d | 2017-03-14 14:41:26 +0300 | [diff] [blame] | 310 | extern unsigned long get_mmap_base(int is_legacy); |
Kirill A. Shutemov | 1e0f25d | 2017-11-15 17:36:06 +0300 | [diff] [blame] | 311 | extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); |
Dmitry Safonov | 1b028f7 | 2017-03-06 17:17:19 +0300 | [diff] [blame] | 312 | |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 313 | #ifdef CONFIG_X86_32 |
| 314 | |
Dmitry Safonov | 8f3e474 | 2017-03-06 17:17:18 +0300 | [diff] [blame] | 315 | #define __STACK_RND_MASK(is32bit) (0x7ff) |
Michal Hocko | 8093833 | 2009-09-08 11:01:55 +0200 | [diff] [blame] | 316 | #define STACK_RND_MASK (0x7ff) |
| 317 | |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 318 | #define ARCH_DLINFO ARCH_DLINFO_IA32 |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 319 | |
| 320 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ |
| 321 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 322 | #else /* CONFIG_X86_32 */ |
| 323 | |
| 324 | /* 1GB for 64bit, 8MB for 32bit */ |
Dmitry Safonov | 8f3e474 | 2017-03-06 17:17:18 +0300 | [diff] [blame] | 325 | #define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff) |
| 326 | #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32()) |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 327 | |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 328 | #define ARCH_DLINFO \ |
| 329 | do { \ |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 330 | if (vdso64_enabled) \ |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 331 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 332 | (unsigned long __force)current->mm->context.vdso); \ |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 333 | } while (0) |
| 334 | |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 335 | /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */ |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 336 | #define ARCH_DLINFO_X32 \ |
| 337 | do { \ |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 338 | if (vdso64_enabled) \ |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 339 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 340 | (unsigned long __force)current->mm->context.vdso); \ |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 341 | } while (0) |
| 342 | |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 343 | #define AT_SYSINFO 32 |
| 344 | |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 345 | #define COMPAT_ARCH_DLINFO \ |
| 346 | if (test_thread_flag(TIF_X32)) \ |
| 347 | ARCH_DLINFO_X32; \ |
| 348 | else \ |
Andy Lutomirski | 3d7ee96 | 2014-05-05 12:19:32 -0700 | [diff] [blame] | 349 | ARCH_DLINFO_IA32 |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 350 | |
| 351 | #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) |
| 352 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 353 | #endif /* !CONFIG_X86_32 */ |
| 354 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 355 | #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) |
| 356 | |
Joe Perches | 486386f6 | 2008-03-23 01:02:08 -0700 | [diff] [blame] | 357 | #define VDSO_ENTRY \ |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 358 | ((unsigned long)current->mm->context.vdso + \ |
Andy Lutomirski | 0a6d1fa | 2015-10-05 17:47:56 -0700 | [diff] [blame] | 359 | vdso_image_32.sym___kernel_vsyscall) |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 360 | |
Thomas Gleixner | 2439a79 | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 361 | struct linux_binprm; |
| 362 | |
| 363 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 |
| 364 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
Martin Schwidefsky | fc5243d | 2008-12-25 13:38:35 +0100 | [diff] [blame] | 365 | int uses_interp); |
Andy Lutomirski | 18d0a6f | 2014-05-05 12:19:35 -0700 | [diff] [blame] | 366 | extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, |
| 367 | int uses_interp); |
| 368 | #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 369 | |
Michel Lespinasse | f9902472 | 2012-12-11 16:01:52 -0800 | [diff] [blame] | 370 | /* Do not change the values. See get_align_mask() */ |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 371 | enum align_flags { |
| 372 | ALIGN_VA_32 = BIT(0), |
| 373 | ALIGN_VA_64 = BIT(1), |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 374 | }; |
| 375 | |
| 376 | struct va_alignment { |
| 377 | int flags; |
| 378 | unsigned long mask; |
Hector Marco-Gisbert | 4e26d11f | 2015-03-27 12:38:21 +0100 | [diff] [blame] | 379 | unsigned long bits; |
Borislav Petkov | dfb09f9 | 2011-08-05 15:15:08 +0200 | [diff] [blame] | 380 | } ____cacheline_aligned; |
| 381 | |
| 382 | extern struct va_alignment va_align; |
Michel Lespinasse | f9902472 | 2012-12-11 16:01:52 -0800 | [diff] [blame] | 383 | extern unsigned long align_vdso_addr(unsigned long); |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 384 | #endif /* _ASM_X86_ELF_H */ |