Thomas Gleixner | b886d83c | 2019-06-01 10:08:55 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 2 | /* |
| 3 | * bpf_jit64.h: BPF JIT compiler for PPC64 |
| 4 | * |
| 5 | * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> |
| 6 | * IBM Corporation |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 7 | */ |
| 8 | #ifndef _BPF_JIT64_H |
| 9 | #define _BPF_JIT64_H |
| 10 | |
| 11 | #include "bpf_jit.h" |
| 12 | |
| 13 | /* |
| 14 | * Stack layout: |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 15 | * Ensure the top half (upto local_tmp_var) stays consistent |
| 16 | * with our redzone usage. |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 17 | * |
| 18 | * [ prev sp ] <------------- |
Daniel Borkmann | dbf44daf | 2018-05-04 01:08:21 +0200 | [diff] [blame] | 19 | * [ nv gpr save area ] 6*8 | |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 20 | * [ tail_call_cnt ] 8 | |
| 21 | * [ local_tmp_var ] 8 | |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 22 | * fp (r31) --> [ ebpf stack space ] upto 512 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 23 | * [ frame header ] 32/112 | |
| 24 | * sp (r1) ---> [ stack pointer ] -------------- |
| 25 | */ |
| 26 | |
Daniel Borkmann | dbf44daf | 2018-05-04 01:08:21 +0200 | [diff] [blame] | 27 | /* for gpr non volatile registers BPG_REG_6 to 10 */ |
| 28 | #define BPF_PPC_STACK_SAVE (6*8) |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 29 | /* for bpf JIT code internal usage */ |
| 30 | #define BPF_PPC_STACK_LOCALS 16 |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 31 | /* stack frame excluding BPF stack, ensure this is quadword aligned */ |
| 32 | #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ |
Naveen N. Rao | 7b847f5 | 2016-09-24 02:05:00 +0530 | [diff] [blame] | 33 | BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 34 | |
| 35 | #ifndef __ASSEMBLY__ |
| 36 | |
| 37 | /* BPF register usage */ |
Daniel Borkmann | dbf44daf | 2018-05-04 01:08:21 +0200 | [diff] [blame] | 38 | #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) |
| 39 | #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 40 | |
| 41 | /* BPF to ppc register mappings */ |
| 42 | static const int b2p[] = { |
| 43 | /* function return value */ |
| 44 | [BPF_REG_0] = 8, |
| 45 | /* function arguments */ |
| 46 | [BPF_REG_1] = 3, |
| 47 | [BPF_REG_2] = 4, |
| 48 | [BPF_REG_3] = 5, |
| 49 | [BPF_REG_4] = 6, |
| 50 | [BPF_REG_5] = 7, |
| 51 | /* non volatile registers */ |
| 52 | [BPF_REG_6] = 27, |
| 53 | [BPF_REG_7] = 28, |
| 54 | [BPF_REG_8] = 29, |
| 55 | [BPF_REG_9] = 30, |
| 56 | /* frame pointer aka BPF_REG_10 */ |
| 57 | [BPF_REG_FP] = 31, |
| 58 | /* eBPF jit internal registers */ |
Naveen N. Rao | b7b7013 | 2016-09-24 02:05:02 +0530 | [diff] [blame] | 59 | [BPF_REG_AX] = 2, |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 60 | [TMP_REG_1] = 9, |
| 61 | [TMP_REG_2] = 10 |
| 62 | }; |
| 63 | |
Daniel Borkmann | dbf44daf | 2018-05-04 01:08:21 +0200 | [diff] [blame] | 64 | /* PPC NVR range -- update this if we ever use NVRs below r27 */ |
| 65 | #define BPF_PPC_NVR_MIN 27 |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 66 | |
Naveen N. Rao | 86be36f | 2019-03-15 20:21:19 +0530 | [diff] [blame] | 67 | /* |
| 68 | * WARNING: These can use TMP_REG_2 if the offset is not at word boundary, |
| 69 | * so ensure that it isn't in use already. |
| 70 | */ |
| 71 | #define PPC_BPF_LL(r, base, i) do { \ |
| 72 | if ((i) % 4) { \ |
| 73 | PPC_LI(b2p[TMP_REG_2], (i)); \ |
| 74 | PPC_LDX(r, base, b2p[TMP_REG_2]); \ |
| 75 | } else \ |
| 76 | PPC_LD(r, base, i); \ |
| 77 | } while(0) |
| 78 | #define PPC_BPF_STL(r, base, i) do { \ |
| 79 | if ((i) % 4) { \ |
| 80 | PPC_LI(b2p[TMP_REG_2], (i)); \ |
| 81 | PPC_STDX(r, base, b2p[TMP_REG_2]); \ |
| 82 | } else \ |
| 83 | PPC_STD(r, base, i); \ |
| 84 | } while(0) |
| 85 | #define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) |
| 86 | |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 87 | #define SEEN_FUNC 0x1000 /* might call external helpers */ |
| 88 | #define SEEN_STACK 0x2000 /* uses BPF stack */ |
Daniel Borkmann | dbf44daf | 2018-05-04 01:08:21 +0200 | [diff] [blame] | 89 | #define SEEN_TAILCALL 0x4000 /* uses tail calls */ |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 90 | |
| 91 | struct codegen_context { |
| 92 | /* |
| 93 | * This is used to track register usage as well |
| 94 | * as calls to external helpers. |
| 95 | * - register usage is tracked with corresponding |
Daniel Borkmann | dbf44daf | 2018-05-04 01:08:21 +0200 | [diff] [blame] | 96 | * bits (r3-r10 and r27-r31) |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 97 | * - rest of the bits can be used to track other |
| 98 | * things -- for now, we use bits 16 to 23 |
| 99 | * encoded in SEEN_* macros above |
| 100 | */ |
| 101 | unsigned int seen; |
| 102 | unsigned int idx; |
Sandipan Das | ac0761e | 2017-09-02 00:23:01 +0530 | [diff] [blame] | 103 | unsigned int stack_size; |
Naveen N. Rao | 156d0e2 | 2016-06-22 21:55:07 +0530 | [diff] [blame] | 104 | }; |
| 105 | |
| 106 | #endif /* !__ASSEMBLY__ */ |
| 107 | |
| 108 | #endif |