Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 6 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 7 | * |
| 8 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> |
| 9 | * Chris Zankel <chris@zankel.net> |
| 10 | * Scott Foehner<sfoehner@yahoo.com>, |
| 11 | * Kevin Chea |
| 12 | * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca> |
| 13 | */ |
| 14 | |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 15 | #include <linux/errno.h> |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 16 | #include <linux/hw_breakpoint.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/perf_event.h> |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 20 | #include <linux/ptrace.h> |
Max Filippov | 06fbac8 | 2018-11-09 19:32:06 -0800 | [diff] [blame] | 21 | #include <linux/regset.h> |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 22 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 23 | #include <linux/sched/task_stack.h> |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 24 | #include <linux/security.h> |
Jesper Juhl | 0ee23b5 | 2005-06-30 02:58:56 -0700 | [diff] [blame] | 25 | #include <linux/signal.h> |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 26 | #include <linux/smp.h> |
Max Filippov | f984409a | 2015-03-16 14:40:14 +0300 | [diff] [blame] | 27 | #include <linux/tracehook.h> |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 28 | #include <linux/uaccess.h> |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 29 | |
Max Filippov | af5395c | 2018-11-11 21:51:49 -0800 | [diff] [blame] | 30 | #define CREATE_TRACE_POINTS |
| 31 | #include <trace/events/syscalls.h> |
| 32 | |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 33 | #include <asm/coprocessor.h> |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 34 | #include <asm/elf.h> |
| 35 | #include <asm/page.h> |
| 36 | #include <asm/pgtable.h> |
| 37 | #include <asm/ptrace.h> |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 38 | |
Max Filippov | 06fbac8 | 2018-11-09 19:32:06 -0800 | [diff] [blame] | 39 | static int gpr_get(struct task_struct *target, |
| 40 | const struct user_regset *regset, |
| 41 | unsigned int pos, unsigned int count, |
| 42 | void *kbuf, void __user *ubuf) |
| 43 | { |
| 44 | struct pt_regs *regs = task_pt_regs(target); |
| 45 | struct user_pt_regs newregs = { |
| 46 | .pc = regs->pc, |
| 47 | .ps = regs->ps & ~(1 << PS_EXCM_BIT), |
| 48 | .lbeg = regs->lbeg, |
| 49 | .lend = regs->lend, |
| 50 | .lcount = regs->lcount, |
| 51 | .sar = regs->sar, |
| 52 | .threadptr = regs->threadptr, |
| 53 | .windowbase = regs->windowbase, |
| 54 | .windowstart = regs->windowstart, |
| 55 | }; |
| 56 | |
| 57 | memcpy(newregs.a, |
| 58 | regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4, |
| 59 | regs->windowbase * 16); |
| 60 | memcpy(newregs.a + regs->windowbase * 4, |
| 61 | regs->areg, |
| 62 | (WSBITS - regs->windowbase) * 16); |
| 63 | |
| 64 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 65 | &newregs, 0, -1); |
| 66 | } |
| 67 | |
| 68 | static int gpr_set(struct task_struct *target, |
| 69 | const struct user_regset *regset, |
| 70 | unsigned int pos, unsigned int count, |
| 71 | const void *kbuf, const void __user *ubuf) |
| 72 | { |
| 73 | int ret; |
| 74 | struct user_pt_regs newregs = {0}; |
| 75 | struct pt_regs *regs; |
| 76 | const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; |
| 77 | |
| 78 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); |
| 79 | if (ret) |
| 80 | return ret; |
| 81 | |
| 82 | if (newregs.windowbase >= XCHAL_NUM_AREGS / 4) |
| 83 | return -EINVAL; |
| 84 | |
| 85 | regs = task_pt_regs(target); |
| 86 | regs->pc = newregs.pc; |
| 87 | regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask); |
| 88 | regs->lbeg = newregs.lbeg; |
| 89 | regs->lend = newregs.lend; |
| 90 | regs->lcount = newregs.lcount; |
| 91 | regs->sar = newregs.sar; |
| 92 | regs->threadptr = newregs.threadptr; |
| 93 | |
| 94 | if (newregs.windowbase != regs->windowbase || |
| 95 | newregs.windowstart != regs->windowstart) { |
| 96 | u32 rotws, wmask; |
| 97 | |
| 98 | rotws = (((newregs.windowstart | |
| 99 | (newregs.windowstart << WSBITS)) >> |
| 100 | newregs.windowbase) & |
| 101 | ((1 << WSBITS) - 1)) & ~1; |
| 102 | wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) | |
| 103 | (rotws & 0xF) | 1; |
| 104 | regs->windowbase = newregs.windowbase; |
| 105 | regs->windowstart = newregs.windowstart; |
| 106 | regs->wmask = wmask; |
| 107 | } |
| 108 | |
| 109 | memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4, |
| 110 | newregs.a, newregs.windowbase * 16); |
| 111 | memcpy(regs->areg, newregs.a + newregs.windowbase * 4, |
| 112 | (WSBITS - newregs.windowbase) * 16); |
| 113 | |
| 114 | return 0; |
| 115 | } |
| 116 | |
Max Filippov | 1819afc | 2018-11-24 21:36:11 -0800 | [diff] [blame] | 117 | static int tie_get(struct task_struct *target, |
| 118 | const struct user_regset *regset, |
| 119 | unsigned int pos, unsigned int count, |
| 120 | void *kbuf, void __user *ubuf) |
| 121 | { |
| 122 | int ret; |
| 123 | struct pt_regs *regs = task_pt_regs(target); |
| 124 | struct thread_info *ti = task_thread_info(target); |
| 125 | elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); |
| 126 | |
| 127 | if (!newregs) |
| 128 | return -ENOMEM; |
| 129 | |
| 130 | newregs->opt = regs->xtregs_opt; |
| 131 | newregs->user = ti->xtregs_user; |
| 132 | |
| 133 | #if XTENSA_HAVE_COPROCESSORS |
| 134 | /* Flush all coprocessor registers to memory. */ |
| 135 | coprocessor_flush_all(ti); |
| 136 | newregs->cp0 = ti->xtregs_cp.cp0; |
| 137 | newregs->cp1 = ti->xtregs_cp.cp1; |
| 138 | newregs->cp2 = ti->xtregs_cp.cp2; |
| 139 | newregs->cp3 = ti->xtregs_cp.cp3; |
| 140 | newregs->cp4 = ti->xtregs_cp.cp4; |
| 141 | newregs->cp5 = ti->xtregs_cp.cp5; |
| 142 | newregs->cp6 = ti->xtregs_cp.cp6; |
| 143 | newregs->cp7 = ti->xtregs_cp.cp7; |
| 144 | #endif |
| 145 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 146 | newregs, 0, -1); |
| 147 | kfree(newregs); |
| 148 | return ret; |
| 149 | } |
| 150 | |
| 151 | static int tie_set(struct task_struct *target, |
| 152 | const struct user_regset *regset, |
| 153 | unsigned int pos, unsigned int count, |
| 154 | const void *kbuf, const void __user *ubuf) |
| 155 | { |
| 156 | int ret; |
| 157 | struct pt_regs *regs = task_pt_regs(target); |
| 158 | struct thread_info *ti = task_thread_info(target); |
| 159 | elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); |
| 160 | |
| 161 | if (!newregs) |
| 162 | return -ENOMEM; |
| 163 | |
| 164 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 165 | newregs, 0, -1); |
| 166 | |
| 167 | if (ret) |
| 168 | goto exit; |
| 169 | regs->xtregs_opt = newregs->opt; |
| 170 | ti->xtregs_user = newregs->user; |
| 171 | |
| 172 | #if XTENSA_HAVE_COPROCESSORS |
| 173 | /* Flush all coprocessors before we overwrite them. */ |
| 174 | coprocessor_flush_all(ti); |
| 175 | coprocessor_release_all(ti); |
| 176 | ti->xtregs_cp.cp0 = newregs->cp0; |
| 177 | ti->xtregs_cp.cp1 = newregs->cp1; |
| 178 | ti->xtregs_cp.cp2 = newregs->cp2; |
| 179 | ti->xtregs_cp.cp3 = newregs->cp3; |
| 180 | ti->xtregs_cp.cp4 = newregs->cp4; |
| 181 | ti->xtregs_cp.cp5 = newregs->cp5; |
| 182 | ti->xtregs_cp.cp6 = newregs->cp6; |
| 183 | ti->xtregs_cp.cp7 = newregs->cp7; |
| 184 | #endif |
| 185 | exit: |
| 186 | kfree(newregs); |
| 187 | return ret; |
| 188 | } |
| 189 | |
Max Filippov | 06fbac8 | 2018-11-09 19:32:06 -0800 | [diff] [blame] | 190 | enum xtensa_regset { |
| 191 | REGSET_GPR, |
Max Filippov | 1819afc | 2018-11-24 21:36:11 -0800 | [diff] [blame] | 192 | REGSET_TIE, |
Max Filippov | 06fbac8 | 2018-11-09 19:32:06 -0800 | [diff] [blame] | 193 | }; |
| 194 | |
| 195 | static const struct user_regset xtensa_regsets[] = { |
| 196 | [REGSET_GPR] = { |
| 197 | .core_note_type = NT_PRSTATUS, |
| 198 | .n = sizeof(struct user_pt_regs) / sizeof(u32), |
| 199 | .size = sizeof(u32), |
| 200 | .align = sizeof(u32), |
| 201 | .get = gpr_get, |
Max Filippov | 1819afc | 2018-11-24 21:36:11 -0800 | [diff] [blame] | 202 | .set = gpr_set, |
| 203 | }, |
| 204 | [REGSET_TIE] = { |
| 205 | .core_note_type = NT_PRFPREG, |
| 206 | .n = sizeof(elf_xtregs_t) / sizeof(u32), |
| 207 | .size = sizeof(u32), |
| 208 | .align = sizeof(u32), |
| 209 | .get = tie_get, |
| 210 | .set = tie_set, |
Max Filippov | 06fbac8 | 2018-11-09 19:32:06 -0800 | [diff] [blame] | 211 | }, |
| 212 | }; |
| 213 | |
| 214 | static const struct user_regset_view user_xtensa_view = { |
| 215 | .name = "xtensa", |
| 216 | .e_machine = EM_XTENSA, |
| 217 | .regsets = xtensa_regsets, |
| 218 | .n = ARRAY_SIZE(xtensa_regsets) |
| 219 | }; |
| 220 | |
| 221 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
| 222 | { |
| 223 | return &user_xtensa_view; |
| 224 | } |
Christoph Hellwig | 6d75ca1 | 2010-03-10 15:22:57 -0800 | [diff] [blame] | 225 | |
| 226 | void user_enable_single_step(struct task_struct *child) |
| 227 | { |
| 228 | child->ptrace |= PT_SINGLESTEP; |
| 229 | } |
| 230 | |
| 231 | void user_disable_single_step(struct task_struct *child) |
| 232 | { |
| 233 | child->ptrace &= ~PT_SINGLESTEP; |
| 234 | } |
| 235 | |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 236 | /* |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 237 | * Called by kernel/ptrace.c when detaching to disable single stepping. |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 238 | */ |
| 239 | |
| 240 | void ptrace_disable(struct task_struct *child) |
| 241 | { |
| 242 | /* Nothing to do.. */ |
| 243 | } |
| 244 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 245 | static int ptrace_getregs(struct task_struct *child, void __user *uregs) |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 246 | { |
Max Filippov | 06fbac8 | 2018-11-09 19:32:06 -0800 | [diff] [blame] | 247 | return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR, |
| 248 | 0, sizeof(xtensa_gregset_t), uregs); |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 249 | } |
| 250 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 251 | static int ptrace_setregs(struct task_struct *child, void __user *uregs) |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 252 | { |
Max Filippov | 06fbac8 | 2018-11-09 19:32:06 -0800 | [diff] [blame] | 253 | return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR, |
| 254 | 0, sizeof(xtensa_gregset_t), uregs); |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 255 | } |
| 256 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 257 | static int ptrace_getxregs(struct task_struct *child, void __user *uregs) |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 258 | { |
Max Filippov | 1819afc | 2018-11-24 21:36:11 -0800 | [diff] [blame] | 259 | return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE, |
| 260 | 0, sizeof(elf_xtregs_t), uregs); |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 261 | } |
| 262 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 263 | static int ptrace_setxregs(struct task_struct *child, void __user *uregs) |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 264 | { |
Max Filippov | 1819afc | 2018-11-24 21:36:11 -0800 | [diff] [blame] | 265 | return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE, |
| 266 | 0, sizeof(elf_xtregs_t), uregs); |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 267 | } |
| 268 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 269 | static int ptrace_peekusr(struct task_struct *child, long regno, |
| 270 | long __user *ret) |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 271 | { |
| 272 | struct pt_regs *regs; |
| 273 | unsigned long tmp; |
| 274 | |
| 275 | regs = task_pt_regs(child); |
| 276 | tmp = 0; /* Default return value. */ |
| 277 | |
| 278 | switch(regno) { |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 279 | case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: |
| 280 | tmp = regs->areg[regno - REG_AR_BASE]; |
| 281 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 282 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 283 | case REG_A_BASE ... REG_A_BASE + 15: |
| 284 | tmp = regs->areg[regno - REG_A_BASE]; |
| 285 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 286 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 287 | case REG_PC: |
| 288 | tmp = regs->pc; |
| 289 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 290 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 291 | case REG_PS: |
| 292 | /* Note: PS.EXCM is not set while user task is running; |
| 293 | * its being set in regs is for exception handling |
| 294 | * convenience. |
| 295 | */ |
| 296 | tmp = (regs->ps & ~(1 << PS_EXCM_BIT)); |
| 297 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 298 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 299 | case REG_WB: |
| 300 | break; /* tmp = 0 */ |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 301 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 302 | case REG_WS: |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 303 | { |
| 304 | unsigned long wb = regs->windowbase; |
| 305 | unsigned long ws = regs->windowstart; |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 306 | tmp = ((ws >> wb) | (ws << (WSBITS - wb))) & |
| 307 | ((1 << WSBITS) - 1); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 308 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 309 | } |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 310 | case REG_LBEG: |
| 311 | tmp = regs->lbeg; |
| 312 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 313 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 314 | case REG_LEND: |
| 315 | tmp = regs->lend; |
| 316 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 317 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 318 | case REG_LCOUNT: |
| 319 | tmp = regs->lcount; |
| 320 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 321 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 322 | case REG_SAR: |
| 323 | tmp = regs->sar; |
| 324 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 325 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 326 | case SYSCALL_NR: |
| 327 | tmp = regs->syscall; |
| 328 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 329 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 330 | default: |
| 331 | return -EIO; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 332 | } |
| 333 | return put_user(tmp, ret); |
| 334 | } |
| 335 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 336 | static int ptrace_pokeusr(struct task_struct *child, long regno, long val) |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 337 | { |
| 338 | struct pt_regs *regs; |
| 339 | regs = task_pt_regs(child); |
| 340 | |
| 341 | switch (regno) { |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 342 | case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: |
| 343 | regs->areg[regno - REG_AR_BASE] = val; |
| 344 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 345 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 346 | case REG_A_BASE ... REG_A_BASE + 15: |
| 347 | regs->areg[regno - REG_A_BASE] = val; |
| 348 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 349 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 350 | case REG_PC: |
| 351 | regs->pc = val; |
| 352 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 353 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 354 | case SYSCALL_NR: |
| 355 | regs->syscall = val; |
| 356 | break; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 357 | |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 358 | default: |
| 359 | return -EIO; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 360 | } |
| 361 | return 0; |
| 362 | } |
| 363 | |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 364 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 365 | static void ptrace_hbptriggered(struct perf_event *bp, |
| 366 | struct perf_sample_data *data, |
| 367 | struct pt_regs *regs) |
| 368 | { |
| 369 | int i; |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 370 | struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); |
| 371 | |
| 372 | if (bp->attr.bp_type & HW_BREAKPOINT_X) { |
| 373 | for (i = 0; i < XCHAL_NUM_IBREAK; ++i) |
| 374 | if (current->thread.ptrace_bp[i] == bp) |
| 375 | break; |
| 376 | i <<= 1; |
| 377 | } else { |
| 378 | for (i = 0; i < XCHAL_NUM_DBREAK; ++i) |
| 379 | if (current->thread.ptrace_wp[i] == bp) |
| 380 | break; |
| 381 | i = (i << 1) | 1; |
| 382 | } |
| 383 | |
Eric W. Biederman | f71dd7d | 2018-01-22 14:37:25 -0600 | [diff] [blame] | 384 | force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address); |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) |
| 388 | { |
| 389 | struct perf_event_attr attr; |
| 390 | |
| 391 | ptrace_breakpoint_init(&attr); |
| 392 | |
| 393 | /* Initialise fields to sane defaults. */ |
| 394 | attr.bp_addr = 0; |
| 395 | attr.bp_len = 1; |
| 396 | attr.bp_type = type; |
| 397 | attr.disabled = 1; |
| 398 | |
| 399 | return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, |
| 400 | tsk); |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * Address bit 0 choose instruction (0) or data (1) break register, bits |
| 405 | * 31..1 are the register number. |
| 406 | * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words: |
| 407 | * address (0) and control (1). |
| 408 | * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set. |
| 409 | * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is |
| 410 | * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a |
| 411 | * breakpoint. To set a breakpoint length must be a power of 2 in the range |
| 412 | * 1..64 and the address must be length-aligned. |
| 413 | */ |
| 414 | |
| 415 | static long ptrace_gethbpregs(struct task_struct *child, long addr, |
| 416 | long __user *datap) |
| 417 | { |
| 418 | struct perf_event *bp; |
| 419 | u32 user_data[2] = {0}; |
| 420 | bool dbreak = addr & 1; |
| 421 | unsigned idx = addr >> 1; |
| 422 | |
| 423 | if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || |
| 424 | (dbreak && idx >= XCHAL_NUM_DBREAK)) |
| 425 | return -EINVAL; |
| 426 | |
| 427 | if (dbreak) |
| 428 | bp = child->thread.ptrace_wp[idx]; |
| 429 | else |
| 430 | bp = child->thread.ptrace_bp[idx]; |
| 431 | |
| 432 | if (bp) { |
| 433 | user_data[0] = bp->attr.bp_addr; |
| 434 | user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len; |
| 435 | if (dbreak) { |
| 436 | if (bp->attr.bp_type & HW_BREAKPOINT_R) |
| 437 | user_data[1] |= DBREAKC_LOAD_MASK; |
| 438 | if (bp->attr.bp_type & HW_BREAKPOINT_W) |
| 439 | user_data[1] |= DBREAKC_STOR_MASK; |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | if (copy_to_user(datap, user_data, sizeof(user_data))) |
| 444 | return -EFAULT; |
| 445 | |
| 446 | return 0; |
| 447 | } |
| 448 | |
| 449 | static long ptrace_sethbpregs(struct task_struct *child, long addr, |
| 450 | long __user *datap) |
| 451 | { |
| 452 | struct perf_event *bp; |
| 453 | struct perf_event_attr attr; |
| 454 | u32 user_data[2]; |
| 455 | bool dbreak = addr & 1; |
| 456 | unsigned idx = addr >> 1; |
| 457 | int bp_type = 0; |
| 458 | |
| 459 | if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || |
| 460 | (dbreak && idx >= XCHAL_NUM_DBREAK)) |
| 461 | return -EINVAL; |
| 462 | |
| 463 | if (copy_from_user(user_data, datap, sizeof(user_data))) |
| 464 | return -EFAULT; |
| 465 | |
| 466 | if (dbreak) { |
| 467 | bp = child->thread.ptrace_wp[idx]; |
| 468 | if (user_data[1] & DBREAKC_LOAD_MASK) |
| 469 | bp_type |= HW_BREAKPOINT_R; |
| 470 | if (user_data[1] & DBREAKC_STOR_MASK) |
| 471 | bp_type |= HW_BREAKPOINT_W; |
| 472 | } else { |
| 473 | bp = child->thread.ptrace_bp[idx]; |
| 474 | bp_type = HW_BREAKPOINT_X; |
| 475 | } |
| 476 | |
| 477 | if (!bp) { |
| 478 | bp = ptrace_hbp_create(child, |
| 479 | bp_type ? bp_type : HW_BREAKPOINT_RW); |
| 480 | if (IS_ERR(bp)) |
| 481 | return PTR_ERR(bp); |
| 482 | if (dbreak) |
| 483 | child->thread.ptrace_wp[idx] = bp; |
| 484 | else |
| 485 | child->thread.ptrace_bp[idx] = bp; |
| 486 | } |
| 487 | |
| 488 | attr = bp->attr; |
| 489 | attr.bp_addr = user_data[0]; |
| 490 | attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK); |
| 491 | attr.bp_type = bp_type; |
| 492 | attr.disabled = !attr.bp_len; |
| 493 | |
| 494 | return modify_user_hw_breakpoint(bp, &attr); |
| 495 | } |
| 496 | #endif |
| 497 | |
Namhyung Kim | 9b05a69 | 2010-10-27 15:33:47 -0700 | [diff] [blame] | 498 | long arch_ptrace(struct task_struct *child, long request, |
| 499 | unsigned long addr, unsigned long data) |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 500 | { |
| 501 | int ret = -EPERM; |
Namhyung Kim | 5ef4507 | 2010-10-27 15:34:06 -0700 | [diff] [blame] | 502 | void __user *datap = (void __user *) data; |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 503 | |
| 504 | switch (request) { |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 505 | case PTRACE_PEEKUSR: /* read register specified by addr. */ |
Namhyung Kim | 5ef4507 | 2010-10-27 15:34:06 -0700 | [diff] [blame] | 506 | ret = ptrace_peekusr(child, addr, datap); |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 507 | break; |
| 508 | |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 509 | case PTRACE_POKEUSR: /* write register specified by addr. */ |
| 510 | ret = ptrace_pokeusr(child, addr, data); |
| 511 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 512 | |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 513 | case PTRACE_GETREGS: |
Namhyung Kim | 5ef4507 | 2010-10-27 15:34:06 -0700 | [diff] [blame] | 514 | ret = ptrace_getregs(child, datap); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 515 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 516 | |
| 517 | case PTRACE_SETREGS: |
Namhyung Kim | 5ef4507 | 2010-10-27 15:34:06 -0700 | [diff] [blame] | 518 | ret = ptrace_setregs(child, datap); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 519 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 520 | |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 521 | case PTRACE_GETXTREGS: |
Namhyung Kim | 5ef4507 | 2010-10-27 15:34:06 -0700 | [diff] [blame] | 522 | ret = ptrace_getxregs(child, datap); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 523 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 524 | |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 525 | case PTRACE_SETXTREGS: |
Namhyung Kim | 5ef4507 | 2010-10-27 15:34:06 -0700 | [diff] [blame] | 526 | ret = ptrace_setxregs(child, datap); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 527 | break; |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 528 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 529 | case PTRACE_GETHBPREGS: |
| 530 | ret = ptrace_gethbpregs(child, addr, datap); |
| 531 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 532 | |
Max Filippov | c91e02b | 2016-01-24 10:32:10 +0300 | [diff] [blame] | 533 | case PTRACE_SETHBPREGS: |
| 534 | ret = ptrace_sethbpregs(child, addr, datap); |
| 535 | break; |
| 536 | #endif |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 537 | default: |
| 538 | ret = ptrace_request(child, request, addr, data); |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 539 | break; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 540 | } |
Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 541 | |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 542 | return ret; |
| 543 | } |
| 544 | |
Max Filippov | 3aee3e2 | 2018-11-15 19:33:47 -0800 | [diff] [blame] | 545 | void do_syscall_trace_enter(struct pt_regs *regs) |
Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 546 | { |
Max Filippov | 1b47a683 | 2015-01-27 03:12:50 +0300 | [diff] [blame] | 547 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
Max Filippov | f984409a | 2015-03-16 14:40:14 +0300 | [diff] [blame] | 548 | tracehook_report_syscall_entry(regs)) |
Max Filippov | 3aee3e2 | 2018-11-15 19:33:47 -0800 | [diff] [blame] | 549 | regs->syscall = NO_SYSCALL; |
Max Filippov | f984409a | 2015-03-16 14:40:14 +0300 | [diff] [blame] | 550 | |
Max Filippov | af5395c | 2018-11-11 21:51:49 -0800 | [diff] [blame] | 551 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
| 552 | trace_sys_enter(regs, syscall_get_nr(current, regs)); |
Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 553 | } |
| 554 | |
| 555 | void do_syscall_trace_leave(struct pt_regs *regs) |
| 556 | { |
Max Filippov | f984409a | 2015-03-16 14:40:14 +0300 | [diff] [blame] | 557 | int step; |
| 558 | |
Max Filippov | af5395c | 2018-11-11 21:51:49 -0800 | [diff] [blame] | 559 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
| 560 | trace_sys_exit(regs, regs_return_value(regs)); |
| 561 | |
Max Filippov | f984409a | 2015-03-16 14:40:14 +0300 | [diff] [blame] | 562 | step = test_thread_flag(TIF_SINGLESTEP); |
| 563 | |
| 564 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
| 565 | tracehook_report_syscall_exit(regs, step); |
Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 566 | } |