| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Testsuite for BPF interpreter and BPF JIT compiler |
| * |
| * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| */ |
| |
| #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| |
| #include <linux/init.h> |
| #include <linux/module.h> |
| #include <linux/filter.h> |
| #include <linux/bpf.h> |
| #include <linux/skbuff.h> |
| #include <linux/netdevice.h> |
| #include <linux/if_vlan.h> |
| #include <linux/random.h> |
| #include <linux/highmem.h> |
| #include <linux/sched.h> |
| |
| /* General test specific settings */ |
| #define MAX_SUBTESTS 3 |
| #define MAX_TESTRUNS 1000 |
| #define MAX_DATA 128 |
| #define MAX_INSNS 512 |
| #define MAX_K 0xffffFFFF |
| |
| /* Few constants used to init test 'skb' */ |
| #define SKB_TYPE 3 |
| #define SKB_MARK 0x1234aaaa |
| #define SKB_HASH 0x1234aaab |
| #define SKB_QUEUE_MAP 123 |
| #define SKB_VLAN_TCI 0xffff |
| #define SKB_VLAN_PRESENT 1 |
| #define SKB_DEV_IFINDEX 577 |
| #define SKB_DEV_TYPE 588 |
| |
| /* Redefine REGs to make tests less verbose */ |
| #define R0 BPF_REG_0 |
| #define R1 BPF_REG_1 |
| #define R2 BPF_REG_2 |
| #define R3 BPF_REG_3 |
| #define R4 BPF_REG_4 |
| #define R5 BPF_REG_5 |
| #define R6 BPF_REG_6 |
| #define R7 BPF_REG_7 |
| #define R8 BPF_REG_8 |
| #define R9 BPF_REG_9 |
| #define R10 BPF_REG_10 |
| |
| /* Flags that can be passed to test cases */ |
| #define FLAG_NO_DATA BIT(0) |
| #define FLAG_EXPECTED_FAIL BIT(1) |
| #define FLAG_SKB_FRAG BIT(2) |
| #define FLAG_VERIFIER_ZEXT BIT(3) |
| #define FLAG_LARGE_MEM BIT(4) |
| |
| enum { |
| CLASSIC = BIT(6), /* Old BPF instructions only. */ |
| INTERNAL = BIT(7), /* Extended instruction set. */ |
| }; |
| |
| #define TEST_TYPE_MASK (CLASSIC | INTERNAL) |
| |
| struct bpf_test { |
| const char *descr; |
| union { |
| struct sock_filter insns[MAX_INSNS]; |
| struct bpf_insn insns_int[MAX_INSNS]; |
| struct { |
| void *insns; |
| unsigned int len; |
| } ptr; |
| } u; |
| __u8 aux; |
| __u8 data[MAX_DATA]; |
| struct { |
| int data_size; |
| __u32 result; |
| } test[MAX_SUBTESTS]; |
| int (*fill_helper)(struct bpf_test *self); |
| int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */ |
| __u8 frag_data[MAX_DATA]; |
| int stack_depth; /* for eBPF only, since tests don't call verifier */ |
| int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */ |
| }; |
| |
| /* Large test cases need separate allocation and fill handler. */ |
| |
| static int bpf_fill_maxinsns1(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| __u32 k = ~0; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < len; i++, k--) |
| insn[i] = __BPF_STMT(BPF_RET | BPF_K, k); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns2(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < len; i++) |
| insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns3(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| struct rnd_state rnd; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| prandom_seed_state(&rnd, 3141592653589793238ULL); |
| |
| for (i = 0; i < len - 1; i++) { |
| __u32 k = prandom_u32_state(&rnd); |
| |
| insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k); |
| } |
| |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns4(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS + 1; |
| struct sock_filter *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < len; i++) |
| insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns5(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0); |
| |
| for (i = 1; i < len - 1; i++) |
| insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); |
| |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns6(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < len - 1; i++) |
| insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + |
| SKF_AD_VLAN_TAG_PRESENT); |
| |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns7(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < len - 4; i++) |
| insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + |
| SKF_AD_CPU); |
| |
| insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0); |
| insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + |
| SKF_AD_CPU); |
| insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0); |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns8(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i, jmp_off = len - 3; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff); |
| |
| for (i = 1; i < len - 1; i++) |
| insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0); |
| |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns9(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct bpf_insn *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2); |
| insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab); |
| insn[2] = BPF_EXIT_INSN(); |
| |
| for (i = 3; i < len - 2; i++) |
| insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe); |
| |
| insn[len - 2] = BPF_EXIT_INSN(); |
| insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1)); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns10(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS, hlen = len - 2; |
| struct bpf_insn *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < hlen / 2; i++) |
| insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i); |
| for (i = hlen - 1; i > hlen / 2; i--) |
| insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i); |
| |
| insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1); |
| insn[hlen] = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac); |
| insn[hlen + 1] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int __bpf_fill_ja(struct bpf_test *self, unsigned int len, |
| unsigned int plen) |
| { |
| struct sock_filter *insn; |
| unsigned int rlen; |
| int i, j; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| rlen = (len % plen) - 1; |
| |
| for (i = 0; i + plen < len; i += plen) |
| for (j = 0; j < plen; j++) |
| insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, |
| plen - 1 - j, 0, 0); |
| for (j = 0; j < rlen; j++) |
| insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j, |
| 0, 0); |
| |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns11(struct bpf_test *self) |
| { |
| /* Hits 70 passes on x86_64 and triggers NOPs padding. */ |
| return __bpf_fill_ja(self, BPF_MAXINSNS, 68); |
| } |
| |
| static int bpf_fill_maxinsns12(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i = 0; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0); |
| |
| for (i = 1; i < len - 1; i++) |
| insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0); |
| |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_maxinsns13(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i = 0; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < len - 3; i++) |
| insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0); |
| |
| insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab); |
| insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0); |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_ja(struct bpf_test *self) |
| { |
| /* Hits exactly 11 passes on x86_64 JIT. */ |
| return __bpf_fill_ja(self, 12, 9); |
| } |
| |
| static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct sock_filter *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| for (i = 0; i < len - 1; i += 2) { |
| insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0); |
| insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, |
| SKF_AD_OFF + SKF_AD_CPU); |
| } |
| |
| insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| static int __bpf_fill_stxdw(struct bpf_test *self, int size) |
| { |
| unsigned int len = BPF_MAXINSNS; |
| struct bpf_insn *insn; |
| int i; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1); |
| insn[1] = BPF_ST_MEM(size, R10, -40, 42); |
| |
| for (i = 2; i < len - 2; i++) |
| insn[i] = BPF_STX_XADD(size, R10, R0, -40); |
| |
| insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40); |
| insn[len - 1] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| self->stack_depth = 40; |
| |
| return 0; |
| } |
| |
| static int bpf_fill_stxw(struct bpf_test *self) |
| { |
| return __bpf_fill_stxdw(self, BPF_W); |
| } |
| |
| static int bpf_fill_stxdw(struct bpf_test *self) |
| { |
| return __bpf_fill_stxdw(self, BPF_DW); |
| } |
| |
| static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64) |
| { |
| struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)}; |
| |
| memcpy(insns, tmp, sizeof(tmp)); |
| return 2; |
| } |
| |
| /* |
| * Branch conversion tests. Complex operations can expand to a lot |
| * of instructions when JITed. This in turn may cause jump offsets |
| * to overflow the field size of the native instruction, triggering |
| * a branch conversion mechanism in some JITs. |
| */ |
| static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm) |
| { |
| struct bpf_insn *insns; |
| int len = S16_MAX + 5; |
| int i; |
| |
| insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); |
| if (!insns) |
| return -ENOMEM; |
| |
| i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL); |
| insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); |
| insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX); |
| insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| while (i < len - 1) { |
| static const int ops[] = { |
| BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD, |
| BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD, |
| }; |
| int op = ops[(i >> 1) % ARRAY_SIZE(ops)]; |
| |
| if (i & 1) |
| insns[i++] = BPF_ALU32_REG(op, R0, R1); |
| else |
| insns[i++] = BPF_ALU64_REG(op, R0, R1); |
| } |
| |
| insns[i++] = BPF_EXIT_INSN(); |
| self->u.ptr.insns = insns; |
| self->u.ptr.len = len; |
| BUG_ON(i != len); |
| |
| return 0; |
| } |
| |
| /* Branch taken by runtime decision */ |
| static int bpf_fill_max_jmp_taken(struct bpf_test *self) |
| { |
| return __bpf_fill_max_jmp(self, BPF_JEQ, 1); |
| } |
| |
| /* Branch not taken by runtime decision */ |
| static int bpf_fill_max_jmp_not_taken(struct bpf_test *self) |
| { |
| return __bpf_fill_max_jmp(self, BPF_JEQ, 0); |
| } |
| |
| /* Branch always taken, known at JIT time */ |
| static int bpf_fill_max_jmp_always_taken(struct bpf_test *self) |
| { |
| return __bpf_fill_max_jmp(self, BPF_JGE, 0); |
| } |
| |
| /* Branch never taken, known at JIT time */ |
| static int bpf_fill_max_jmp_never_taken(struct bpf_test *self) |
| { |
| return __bpf_fill_max_jmp(self, BPF_JLT, 0); |
| } |
| |
| /* ALU result computation used in tests */ |
| static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op) |
| { |
| *res = 0; |
| switch (op) { |
| case BPF_MOV: |
| *res = v2; |
| break; |
| case BPF_AND: |
| *res = v1 & v2; |
| break; |
| case BPF_OR: |
| *res = v1 | v2; |
| break; |
| case BPF_XOR: |
| *res = v1 ^ v2; |
| break; |
| case BPF_LSH: |
| *res = v1 << v2; |
| break; |
| case BPF_RSH: |
| *res = v1 >> v2; |
| break; |
| case BPF_ARSH: |
| *res = v1 >> v2; |
| if (v2 > 0 && v1 > S64_MAX) |
| *res |= ~0ULL << (64 - v2); |
| break; |
| case BPF_ADD: |
| *res = v1 + v2; |
| break; |
| case BPF_SUB: |
| *res = v1 - v2; |
| break; |
| case BPF_MUL: |
| *res = v1 * v2; |
| break; |
| case BPF_DIV: |
| if (v2 == 0) |
| return false; |
| *res = div64_u64(v1, v2); |
| break; |
| case BPF_MOD: |
| if (v2 == 0) |
| return false; |
| div64_u64_rem(v1, v2, res); |
| break; |
| } |
| return true; |
| } |
| |
| /* Test an ALU shift operation for all valid shift values */ |
| static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op, |
| u8 mode, bool alu32) |
| { |
| static const s64 regs[] = { |
| 0x0123456789abcdefLL, /* dword > 0, word < 0 */ |
| 0xfedcba9876543210LL, /* dword < 0, word > 0 */ |
| 0xfedcba0198765432LL, /* dword < 0, word < 0 */ |
| 0x0123458967abcdefLL, /* dword > 0, word > 0 */ |
| }; |
| int bits = alu32 ? 32 : 64; |
| int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3; |
| struct bpf_insn *insn; |
| int imm, k; |
| int i = 0; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); |
| |
| for (k = 0; k < ARRAY_SIZE(regs); k++) { |
| s64 reg = regs[k]; |
| |
| i += __bpf_ld_imm64(&insn[i], R3, reg); |
| |
| for (imm = 0; imm < bits; imm++) { |
| u64 val; |
| |
| /* Perform operation */ |
| insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3); |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm); |
| if (alu32) { |
| if (mode == BPF_K) |
| insn[i++] = BPF_ALU32_IMM(op, R1, imm); |
| else |
| insn[i++] = BPF_ALU32_REG(op, R1, R2); |
| |
| if (op == BPF_ARSH) |
| reg = (s32)reg; |
| else |
| reg = (u32)reg; |
| __bpf_alu_result(&val, reg, imm, op); |
| val = (u32)val; |
| } else { |
| if (mode == BPF_K) |
| insn[i++] = BPF_ALU64_IMM(op, R1, imm); |
| else |
| insn[i++] = BPF_ALU64_REG(op, R1, R2); |
| __bpf_alu_result(&val, reg, imm, op); |
| } |
| |
| /* |
| * When debugging a JIT that fails this test, one |
| * can write the immediate value to R0 here to find |
| * out which operand values that fail. |
| */ |
| |
| /* Load reference and check the result */ |
| i += __bpf_ld_imm64(&insn[i], R4, val); |
| insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| } |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| BUG_ON(i != len); |
| |
| return 0; |
| } |
| |
| static int bpf_fill_alu64_lsh_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false); |
| } |
| |
| static int bpf_fill_alu64_rsh_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false); |
| } |
| |
| static int bpf_fill_alu64_arsh_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false); |
| } |
| |
| static int bpf_fill_alu64_lsh_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false); |
| } |
| |
| static int bpf_fill_alu64_rsh_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false); |
| } |
| |
| static int bpf_fill_alu64_arsh_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false); |
| } |
| |
| static int bpf_fill_alu32_lsh_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true); |
| } |
| |
| static int bpf_fill_alu32_rsh_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true); |
| } |
| |
| static int bpf_fill_alu32_arsh_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true); |
| } |
| |
| static int bpf_fill_alu32_lsh_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true); |
| } |
| |
| static int bpf_fill_alu32_rsh_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true); |
| } |
| |
| static int bpf_fill_alu32_arsh_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true); |
| } |
| |
| /* |
| * Test an ALU register shift operation for all valid shift values |
| * for the case when the source and destination are the same. |
| */ |
| static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op, |
| bool alu32) |
| { |
| int bits = alu32 ? 32 : 64; |
| int len = 3 + 6 * bits; |
| struct bpf_insn *insn; |
| int i = 0; |
| u64 val; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); |
| |
| for (val = 0; val < bits; val++) { |
| u64 res; |
| |
| /* Perform operation */ |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val); |
| if (alu32) |
| insn[i++] = BPF_ALU32_REG(op, R1, R1); |
| else |
| insn[i++] = BPF_ALU64_REG(op, R1, R1); |
| |
| /* Compute the reference result */ |
| __bpf_alu_result(&res, val, val, op); |
| if (alu32) |
| res = (u32)res; |
| i += __bpf_ld_imm64(&insn[i], R2, res); |
| |
| /* Check the actual result */ |
| insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| BUG_ON(i != len); |
| |
| return 0; |
| } |
| |
| static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false); |
| } |
| |
| static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false); |
| } |
| |
| static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false); |
| } |
| |
| static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true); |
| } |
| |
| static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true); |
| } |
| |
| static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true); |
| } |
| |
| /* |
| * Common operand pattern generator for exhaustive power-of-two magnitudes |
| * tests. The block size parameters can be adjusted to increase/reduce the |
| * number of combinatons tested and thereby execution speed and memory |
| * footprint. |
| */ |
| |
| static inline s64 value(int msb, int delta, int sign) |
| { |
| return sign * (1LL << msb) + delta; |
| } |
| |
| static int __bpf_fill_pattern(struct bpf_test *self, void *arg, |
| int dbits, int sbits, int block1, int block2, |
| int (*emit)(struct bpf_test*, void*, |
| struct bpf_insn*, s64, s64)) |
| { |
| static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}}; |
| struct bpf_insn *insns; |
| int di, si, bt, db, sb; |
| int count, len, k; |
| int extra = 1 + 2; |
| int i = 0; |
| |
| /* Total number of iterations for the two pattern */ |
| count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn); |
| count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn); |
| |
| /* Compute the maximum number of insns and allocate the buffer */ |
| len = extra + count * (*emit)(self, arg, NULL, 0, 0); |
| insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); |
| if (!insns) |
| return -ENOMEM; |
| |
| /* Add head instruction(s) */ |
| insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); |
| |
| /* |
| * Pattern 1: all combinations of power-of-two magnitudes and sign, |
| * and with a block of contiguous values around each magnitude. |
| */ |
| for (di = 0; di < dbits - 1; di++) /* Dst magnitudes */ |
| for (si = 0; si < sbits - 1; si++) /* Src magnitudes */ |
| for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */ |
| for (db = -(block1 / 2); |
| db < (block1 + 1) / 2; db++) |
| for (sb = -(block1 / 2); |
| sb < (block1 + 1) / 2; sb++) { |
| s64 dst, src; |
| |
| dst = value(di, db, sgn[k][0]); |
| src = value(si, sb, sgn[k][1]); |
| i += (*emit)(self, arg, |
| &insns[i], |
| dst, src); |
| } |
| /* |
| * Pattern 2: all combinations for a larger block of values |
| * for each power-of-two magnitude and sign, where the magnitude is |
| * the same for both operands. |
| */ |
| for (bt = 0; bt < max(dbits, sbits) - 1; bt++) /* Magnitude */ |
| for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */ |
| for (db = -(block2 / 2); db < (block2 + 1) / 2; db++) |
| for (sb = -(block2 / 2); |
| sb < (block2 + 1) / 2; sb++) { |
| s64 dst, src; |
| |
| dst = value(bt % dbits, db, sgn[k][0]); |
| src = value(bt % sbits, sb, sgn[k][1]); |
| i += (*emit)(self, arg, &insns[i], |
| dst, src); |
| } |
| |
| /* Append tail instructions */ |
| insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| BUG_ON(i > len); |
| |
| self->u.ptr.insns = insns; |
| self->u.ptr.len = i; |
| |
| return 0; |
| } |
| |
| /* |
| * Block size parameters used in pattern tests below. une as needed to |
| * increase/reduce the number combinations tested, see following examples. |
| * block values per operand MSB |
| * ---------------------------------------- |
| * 0 none |
| * 1 (1 << MSB) |
| * 2 (1 << MSB) + [-1, 0] |
| * 3 (1 << MSB) + [-1, 0, 1] |
| */ |
| #define PATTERN_BLOCK1 1 |
| #define PATTERN_BLOCK2 5 |
| |
| /* Number of test runs for a pattern test */ |
| #define NR_PATTERN_RUNS 1 |
| |
| /* |
| * Exhaustive tests of ALU operations for all combinations of power-of-two |
| * magnitudes of the operands, both for positive and negative values. The |
| * test is designed to verify e.g. the ALU and ALU64 operations for JITs that |
| * emit different code depending on the magnitude of the immediate value. |
| */ |
| static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 imm) |
| { |
| int op = *(int *)arg; |
| int i = 0; |
| u64 res; |
| |
| if (!insns) |
| return 7; |
| |
| if (__bpf_alu_result(&res, dst, (s32)imm, op)) { |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R3, res); |
| insns[i++] = BPF_ALU64_IMM(op, R1, imm); |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| } |
| |
| return i; |
| } |
| |
| static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 imm) |
| { |
| int op = *(int *)arg; |
| int i = 0; |
| u64 res; |
| |
| if (!insns) |
| return 7; |
| |
| if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) { |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R3, (u32)res); |
| insns[i++] = BPF_ALU32_IMM(op, R1, imm); |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| } |
| |
| return i; |
| } |
| |
| static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int op = *(int *)arg; |
| int i = 0; |
| u64 res; |
| |
| if (!insns) |
| return 9; |
| |
| if (__bpf_alu_result(&res, dst, src, op)) { |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| i += __bpf_ld_imm64(&insns[i], R3, res); |
| insns[i++] = BPF_ALU64_REG(op, R1, R2); |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| } |
| |
| return i; |
| } |
| |
| static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int op = *(int *)arg; |
| int i = 0; |
| u64 res; |
| |
| if (!insns) |
| return 9; |
| |
| if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) { |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| i += __bpf_ld_imm64(&insns[i], R3, (u32)res); |
| insns[i++] = BPF_ALU32_REG(op, R1, R2); |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| } |
| |
| return i; |
| } |
| |
| static int __bpf_fill_alu64_imm(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 32, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_alu64_imm); |
| } |
| |
| static int __bpf_fill_alu32_imm(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 32, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_alu32_imm); |
| } |
| |
| static int __bpf_fill_alu64_reg(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 64, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_alu64_reg); |
| } |
| |
| static int __bpf_fill_alu32_reg(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 64, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_alu32_reg); |
| } |
| |
| /* ALU64 immediate operations */ |
| static int bpf_fill_alu64_mov_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_MOV); |
| } |
| |
| static int bpf_fill_alu64_and_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_AND); |
| } |
| |
| static int bpf_fill_alu64_or_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_OR); |
| } |
| |
| static int bpf_fill_alu64_xor_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_XOR); |
| } |
| |
| static int bpf_fill_alu64_add_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_ADD); |
| } |
| |
| static int bpf_fill_alu64_sub_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_SUB); |
| } |
| |
| static int bpf_fill_alu64_mul_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_MUL); |
| } |
| |
| static int bpf_fill_alu64_div_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_DIV); |
| } |
| |
| static int bpf_fill_alu64_mod_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_imm(self, BPF_MOD); |
| } |
| |
| /* ALU32 immediate operations */ |
| static int bpf_fill_alu32_mov_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_MOV); |
| } |
| |
| static int bpf_fill_alu32_and_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_AND); |
| } |
| |
| static int bpf_fill_alu32_or_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_OR); |
| } |
| |
| static int bpf_fill_alu32_xor_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_XOR); |
| } |
| |
| static int bpf_fill_alu32_add_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_ADD); |
| } |
| |
| static int bpf_fill_alu32_sub_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_SUB); |
| } |
| |
| static int bpf_fill_alu32_mul_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_MUL); |
| } |
| |
| static int bpf_fill_alu32_div_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_DIV); |
| } |
| |
| static int bpf_fill_alu32_mod_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_imm(self, BPF_MOD); |
| } |
| |
| /* ALU64 register operations */ |
| static int bpf_fill_alu64_mov_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_MOV); |
| } |
| |
| static int bpf_fill_alu64_and_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_AND); |
| } |
| |
| static int bpf_fill_alu64_or_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_OR); |
| } |
| |
| static int bpf_fill_alu64_xor_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_XOR); |
| } |
| |
| static int bpf_fill_alu64_add_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_ADD); |
| } |
| |
| static int bpf_fill_alu64_sub_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_SUB); |
| } |
| |
| static int bpf_fill_alu64_mul_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_MUL); |
| } |
| |
| static int bpf_fill_alu64_div_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_DIV); |
| } |
| |
| static int bpf_fill_alu64_mod_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu64_reg(self, BPF_MOD); |
| } |
| |
| /* ALU32 register operations */ |
| static int bpf_fill_alu32_mov_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_MOV); |
| } |
| |
| static int bpf_fill_alu32_and_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_AND); |
| } |
| |
| static int bpf_fill_alu32_or_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_OR); |
| } |
| |
| static int bpf_fill_alu32_xor_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_XOR); |
| } |
| |
| static int bpf_fill_alu32_add_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_ADD); |
| } |
| |
| static int bpf_fill_alu32_sub_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_SUB); |
| } |
| |
| static int bpf_fill_alu32_mul_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_MUL); |
| } |
| |
| static int bpf_fill_alu32_div_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_DIV); |
| } |
| |
| static int bpf_fill_alu32_mod_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_alu32_reg(self, BPF_MOD); |
| } |
| |
| /* |
| * Test JITs that implement complex ALU operations as function |
| * calls, and must re-arrange operands for argument passing. |
| */ |
| static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32) |
| { |
| int len = 2 + 10 * 10; |
| struct bpf_insn *insns; |
| u64 dst, res; |
| int i = 0; |
| u32 imm; |
| int rd; |
| |
| insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); |
| if (!insns) |
| return -ENOMEM; |
| |
| /* Operand and result values according to operation */ |
| if (alu32) |
| dst = 0x76543210U; |
| else |
| dst = 0x7edcba9876543210ULL; |
| imm = 0x01234567U; |
| |
| if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH) |
| imm &= 31; |
| |
| __bpf_alu_result(&res, dst, imm, op); |
| |
| if (alu32) |
| res = (u32)res; |
| |
| /* Check all operand registers */ |
| for (rd = R0; rd <= R9; rd++) { |
| i += __bpf_ld_imm64(&insns[i], rd, dst); |
| |
| if (alu32) |
| insns[i++] = BPF_ALU32_IMM(op, rd, imm); |
| else |
| insns[i++] = BPF_ALU64_IMM(op, rd, imm); |
| |
| insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32); |
| insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| } |
| |
| insns[i++] = BPF_MOV64_IMM(R0, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insns; |
| self->u.ptr.len = len; |
| BUG_ON(i != len); |
| |
| return 0; |
| } |
| |
| /* ALU64 K registers */ |
| static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_MOV, false); |
| } |
| |
| static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_AND, false); |
| } |
| |
| static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_OR, false); |
| } |
| |
| static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_XOR, false); |
| } |
| |
| static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_LSH, false); |
| } |
| |
| static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_RSH, false); |
| } |
| |
| static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false); |
| } |
| |
| static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_ADD, false); |
| } |
| |
| static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_SUB, false); |
| } |
| |
| static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_MUL, false); |
| } |
| |
| static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_DIV, false); |
| } |
| |
| static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_MOD, false); |
| } |
| |
| /* ALU32 K registers */ |
| static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_MOV, true); |
| } |
| |
| static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_AND, true); |
| } |
| |
| static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_OR, true); |
| } |
| |
| static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_XOR, true); |
| } |
| |
| static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_LSH, true); |
| } |
| |
| static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_RSH, true); |
| } |
| |
| static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true); |
| } |
| |
| static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_ADD, true); |
| } |
| |
| static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_SUB, true); |
| } |
| |
| static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_MUL, true); |
| } |
| |
| static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_DIV, true); |
| } |
| |
| static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_imm_regs(self, BPF_MOD, true); |
| } |
| |
| /* |
| * Test JITs that implement complex ALU operations as function |
| * calls, and must re-arrange operands for argument passing. |
| */ |
| static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32) |
| { |
| int len = 2 + 10 * 10 * 12; |
| u64 dst, src, res, same; |
| struct bpf_insn *insns; |
| int rd, rs; |
| int i = 0; |
| |
| insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); |
| if (!insns) |
| return -ENOMEM; |
| |
| /* Operand and result values according to operation */ |
| if (alu32) { |
| dst = 0x76543210U; |
| src = 0x01234567U; |
| } else { |
| dst = 0x7edcba9876543210ULL; |
| src = 0x0123456789abcdefULL; |
| } |
| |
| if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH) |
| src &= 31; |
| |
| __bpf_alu_result(&res, dst, src, op); |
| __bpf_alu_result(&same, src, src, op); |
| |
| if (alu32) { |
| res = (u32)res; |
| same = (u32)same; |
| } |
| |
| /* Check all combinations of operand registers */ |
| for (rd = R0; rd <= R9; rd++) { |
| for (rs = R0; rs <= R9; rs++) { |
| u64 val = rd == rs ? same : res; |
| |
| i += __bpf_ld_imm64(&insns[i], rd, dst); |
| i += __bpf_ld_imm64(&insns[i], rs, src); |
| |
| if (alu32) |
| insns[i++] = BPF_ALU32_REG(op, rd, rs); |
| else |
| insns[i++] = BPF_ALU64_REG(op, rd, rs); |
| |
| insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32); |
| insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| } |
| } |
| |
| insns[i++] = BPF_MOV64_IMM(R0, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insns; |
| self->u.ptr.len = len; |
| BUG_ON(i != len); |
| |
| return 0; |
| } |
| |
| /* ALU64 X register combinations */ |
| static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false); |
| } |
| |
| static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_AND, false); |
| } |
| |
| static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_OR, false); |
| } |
| |
| static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false); |
| } |
| |
| static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false); |
| } |
| |
| static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false); |
| } |
| |
| static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false); |
| } |
| |
| static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false); |
| } |
| |
| static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false); |
| } |
| |
| static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false); |
| } |
| |
| static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false); |
| } |
| |
| static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false); |
| } |
| |
| /* ALU32 X register combinations */ |
| static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true); |
| } |
| |
| static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_AND, true); |
| } |
| |
| static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_OR, true); |
| } |
| |
| static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true); |
| } |
| |
| static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true); |
| } |
| |
| static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true); |
| } |
| |
| static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true); |
| } |
| |
| static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true); |
| } |
| |
| static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true); |
| } |
| |
| static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true); |
| } |
| |
| static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true); |
| } |
| |
| static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true); |
| } |
| |
| /* |
| * Exhaustive tests of atomic operations for all power-of-two operand |
| * magnitudes, both for positive and negative values. |
| */ |
| |
| static int __bpf_emit_atomic64(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int op = *(int *)arg; |
| u64 keep, fetch, res; |
| int i = 0; |
| |
| if (!insns) |
| return 21; |
| |
| switch (op) { |
| case BPF_XCHG: |
| res = src; |
| break; |
| default: |
| __bpf_alu_result(&res, dst, src, BPF_OP(op)); |
| } |
| |
| keep = 0x0123456789abcdefULL; |
| if (op & BPF_FETCH) |
| fetch = dst; |
| else |
| fetch = src; |
| |
| i += __bpf_ld_imm64(&insns[i], R0, keep); |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| i += __bpf_ld_imm64(&insns[i], R3, res); |
| i += __bpf_ld_imm64(&insns[i], R4, fetch); |
| i += __bpf_ld_imm64(&insns[i], R5, keep); |
| |
| insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8); |
| insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8); |
| insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| static int __bpf_emit_atomic32(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int op = *(int *)arg; |
| u64 keep, fetch, res; |
| int i = 0; |
| |
| if (!insns) |
| return 21; |
| |
| switch (op) { |
| case BPF_XCHG: |
| res = src; |
| break; |
| default: |
| __bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op)); |
| } |
| |
| keep = 0x0123456789abcdefULL; |
| if (op & BPF_FETCH) |
| fetch = (u32)dst; |
| else |
| fetch = src; |
| |
| i += __bpf_ld_imm64(&insns[i], R0, keep); |
| i += __bpf_ld_imm64(&insns[i], R1, (u32)dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| i += __bpf_ld_imm64(&insns[i], R3, (u32)res); |
| i += __bpf_ld_imm64(&insns[i], R4, fetch); |
| i += __bpf_ld_imm64(&insns[i], R5, keep); |
| |
| insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4); |
| insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4); |
| insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int i = 0; |
| |
| if (!insns) |
| return 23; |
| |
| i += __bpf_ld_imm64(&insns[i], R0, ~dst); |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| |
| /* Result unsuccessful */ |
| insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8); |
| insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8); |
| insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| /* Result successful */ |
| insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8); |
| insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2); |
| insns[i++] = BPF_MOV64_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int i = 0; |
| |
| if (!insns) |
| return 27; |
| |
| i += __bpf_ld_imm64(&insns[i], R0, ~dst); |
| i += __bpf_ld_imm64(&insns[i], R1, (u32)dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| |
| /* Result unsuccessful */ |
| insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4); |
| insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4); |
| insns[i++] = BPF_ZEXT_REG(R0); /* Zext always inserted by verifier */ |
| insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4); |
| |
| insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2); |
| insns[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2); |
| insns[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| /* Result successful */ |
| i += __bpf_ld_imm64(&insns[i], R0, dst); |
| insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4); |
| insns[i++] = BPF_ZEXT_REG(R0); /* Zext always inserted by verifier */ |
| insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4); |
| |
| insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2); |
| insns[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2); |
| insns[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| static int __bpf_fill_atomic64(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 64, |
| 0, PATTERN_BLOCK2, |
| &__bpf_emit_atomic64); |
| } |
| |
| static int __bpf_fill_atomic32(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 64, |
| 0, PATTERN_BLOCK2, |
| &__bpf_emit_atomic32); |
| } |
| |
| /* 64-bit atomic operations */ |
| static int bpf_fill_atomic64_add(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_ADD); |
| } |
| |
| static int bpf_fill_atomic64_and(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_AND); |
| } |
| |
| static int bpf_fill_atomic64_or(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_OR); |
| } |
| |
| static int bpf_fill_atomic64_xor(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_XOR); |
| } |
| |
| static int bpf_fill_atomic64_add_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_and_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_or_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_xchg(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic64(self, BPF_XCHG); |
| } |
| |
| static int bpf_fill_cmpxchg64(struct bpf_test *self) |
| { |
| return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2, |
| &__bpf_emit_cmpxchg64); |
| } |
| |
| /* 32-bit atomic operations */ |
| static int bpf_fill_atomic32_add(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_ADD); |
| } |
| |
| static int bpf_fill_atomic32_and(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_AND); |
| } |
| |
| static int bpf_fill_atomic32_or(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_OR); |
| } |
| |
| static int bpf_fill_atomic32_xor(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_XOR); |
| } |
| |
| static int bpf_fill_atomic32_add_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_and_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_or_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_xchg(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic32(self, BPF_XCHG); |
| } |
| |
| static int bpf_fill_cmpxchg32(struct bpf_test *self) |
| { |
| return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2, |
| &__bpf_emit_cmpxchg32); |
| } |
| |
| /* |
| * Test JITs that implement ATOMIC operations as function calls or |
| * other primitives, and must re-arrange operands for argument passing. |
| */ |
| static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op) |
| { |
| struct bpf_insn *insn; |
| int len = 2 + 34 * 10 * 10; |
| u64 mem, upd, res; |
| int rd, rs, i = 0; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| /* Operand and memory values */ |
| if (width == BPF_DW) { |
| mem = 0x0123456789abcdefULL; |
| upd = 0xfedcba9876543210ULL; |
| } else { /* BPF_W */ |
| mem = 0x01234567U; |
| upd = 0x76543210U; |
| } |
| |
| /* Memory updated according to operation */ |
| switch (op) { |
| case BPF_XCHG: |
| res = upd; |
| break; |
| case BPF_CMPXCHG: |
| res = mem; |
| break; |
| default: |
| __bpf_alu_result(&res, mem, upd, BPF_OP(op)); |
| } |
| |
| /* Test all operand registers */ |
| for (rd = R0; rd <= R9; rd++) { |
| for (rs = R0; rs <= R9; rs++) { |
| u64 cmp, src; |
| |
| /* Initialize value in memory */ |
| i += __bpf_ld_imm64(&insn[i], R0, mem); |
| insn[i++] = BPF_STX_MEM(width, R10, R0, -8); |
| |
| /* Initialize registers in order */ |
| i += __bpf_ld_imm64(&insn[i], R0, ~mem); |
| i += __bpf_ld_imm64(&insn[i], rs, upd); |
| insn[i++] = BPF_MOV64_REG(rd, R10); |
| |
| /* Perform atomic operation */ |
| insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8); |
| if (op == BPF_CMPXCHG && width == BPF_W) |
| insn[i++] = BPF_ZEXT_REG(R0); |
| |
| /* Check R0 register value */ |
| if (op == BPF_CMPXCHG) |
| cmp = mem; /* Expect value from memory */ |
| else if (R0 == rd || R0 == rs) |
| cmp = 0; /* Aliased, checked below */ |
| else |
| cmp = ~mem; /* Expect value to be preserved */ |
| if (cmp) { |
| insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0, |
| (u32)cmp, 2); |
| insn[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insn[i++] = BPF_EXIT_INSN(); |
| insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32); |
| insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0, |
| cmp >> 32, 2); |
| insn[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| |
| /* Check source register value */ |
| if (rs == R0 && op == BPF_CMPXCHG) |
| src = 0; /* Aliased with R0, checked above */ |
| else if (rs == rd && (op == BPF_CMPXCHG || |
| !(op & BPF_FETCH))) |
| src = 0; /* Aliased with rd, checked below */ |
| else if (op == BPF_CMPXCHG) |
| src = upd; /* Expect value to be preserved */ |
| else if (op & BPF_FETCH) |
| src = mem; /* Expect fetched value from mem */ |
| else /* no fetch */ |
| src = upd; /* Expect value to be preserved */ |
| if (src) { |
| insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs, |
| (u32)src, 2); |
| insn[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insn[i++] = BPF_EXIT_INSN(); |
| insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32); |
| insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs, |
| src >> 32, 2); |
| insn[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| |
| /* Check destination register value */ |
| if (!(rd == R0 && op == BPF_CMPXCHG) && |
| !(rd == rs && (op & BPF_FETCH))) { |
| insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2); |
| insn[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| |
| /* Check value in memory */ |
| if (rs != rd) { /* No aliasing */ |
| i += __bpf_ld_imm64(&insn[i], R1, res); |
| } else if (op == BPF_XCHG) { /* Aliased, XCHG */ |
| insn[i++] = BPF_MOV64_REG(R1, R10); |
| } else if (op == BPF_CMPXCHG) { /* Aliased, CMPXCHG */ |
| i += __bpf_ld_imm64(&insn[i], R1, mem); |
| } else { /* Aliased, ALU oper */ |
| i += __bpf_ld_imm64(&insn[i], R1, mem); |
| insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10); |
| } |
| |
| insn[i++] = BPF_LDX_MEM(width, R0, R10, -8); |
| if (width == BPF_DW) |
| insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2); |
| else /* width == BPF_W */ |
| insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2); |
| insn[i++] = BPF_MOV32_IMM(R0, __LINE__); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| } |
| |
| insn[i++] = BPF_MOV64_IMM(R0, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = i; |
| BUG_ON(i > len); |
| |
| return 0; |
| } |
| |
| /* 64-bit atomic register tests */ |
| static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD); |
| } |
| |
| static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND); |
| } |
| |
| static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR); |
| } |
| |
| static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR); |
| } |
| |
| static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG); |
| } |
| |
| static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG); |
| } |
| |
| /* 32-bit atomic register tests */ |
| static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD); |
| } |
| |
| static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND); |
| } |
| |
| static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR); |
| } |
| |
| static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR); |
| } |
| |
| static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH); |
| } |
| |
| static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG); |
| } |
| |
| static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self) |
| { |
| return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG); |
| } |
| |
| /* |
| * Test the two-instruction 64-bit immediate load operation for all |
| * power-of-two magnitudes of the immediate operand. For each MSB, a block |
| * of immediate values centered around the power-of-two MSB are tested, |
| * both for positive and negative values. The test is designed to verify |
| * the operation for JITs that emit different code depending on the magnitude |
| * of the immediate value. This is often the case if the native instruction |
| * immediate field width is narrower than 32 bits. |
| */ |
| static int bpf_fill_ld_imm64_magn(struct bpf_test *self) |
| { |
| int block = 64; /* Increase for more tests per MSB position */ |
| int len = 3 + 8 * 63 * block * 2; |
| struct bpf_insn *insn; |
| int bit, adj, sign; |
| int i = 0; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); |
| |
| for (bit = 0; bit <= 62; bit++) { |
| for (adj = -block / 2; adj < block / 2; adj++) { |
| for (sign = -1; sign <= 1; sign += 2) { |
| s64 imm = sign * ((1LL << bit) + adj); |
| |
| /* Perform operation */ |
| i += __bpf_ld_imm64(&insn[i], R1, imm); |
| |
| /* Load reference */ |
| insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm); |
| insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, |
| (u32)(imm >> 32)); |
| insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32); |
| insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3); |
| |
| /* Check result */ |
| insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| } |
| } |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| BUG_ON(i != len); |
| |
| return 0; |
| } |
| |
| /* |
| * Test the two-instruction 64-bit immediate load operation for different |
| * combinations of bytes. Each byte in the 64-bit word is constructed as |
| * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG. |
| * All patterns (base1, mask1) and (base2, mask2) bytes are tested. |
| */ |
| static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self, |
| u8 base1, u8 mask1, |
| u8 base2, u8 mask2) |
| { |
| struct bpf_insn *insn; |
| int len = 3 + 8 * BIT(8); |
| int pattern, index; |
| u32 rand = 1; |
| int i = 0; |
| |
| insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| if (!insn) |
| return -ENOMEM; |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); |
| |
| for (pattern = 0; pattern < BIT(8); pattern++) { |
| u64 imm = 0; |
| |
| for (index = 0; index < 8; index++) { |
| int byte; |
| |
| if (pattern & BIT(index)) |
| byte = (base1 & mask1) | (rand & ~mask1); |
| else |
| byte = (base2 & mask2) | (rand & ~mask2); |
| imm = (imm << 8) | byte; |
| } |
| |
| /* Update our LCG */ |
| rand = rand * 1664525 + 1013904223; |
| |
| /* Perform operation */ |
| i += __bpf_ld_imm64(&insn[i], R1, imm); |
| |
| /* Load reference */ |
| insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm); |
| insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32)); |
| insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32); |
| insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3); |
| |
| /* Check result */ |
| insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| } |
| |
| insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); |
| insn[i++] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insn; |
| self->u.ptr.len = len; |
| BUG_ON(i != len); |
| |
| return 0; |
| } |
| |
| static int bpf_fill_ld_imm64_checker(struct bpf_test *self) |
| { |
| return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff); |
| } |
| |
| static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self) |
| { |
| return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80); |
| } |
| |
| static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self) |
| { |
| return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff); |
| } |
| |
| static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self) |
| { |
| return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff); |
| } |
| |
| /* |
| * Exhaustive tests of JMP operations for all combinations of power-of-two |
| * magnitudes of the operands, both for positive and negative values. The |
| * test is designed to verify e.g. the JMP and JMP32 operations for JITs that |
| * emit different code depending on the magnitude of the immediate value. |
| */ |
| |
| static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op) |
| { |
| switch (op) { |
| case BPF_JSET: |
| return !!(v1 & v2); |
| case BPF_JEQ: |
| return v1 == v2; |
| case BPF_JNE: |
| return v1 != v2; |
| case BPF_JGT: |
| return (u64)v1 > (u64)v2; |
| case BPF_JGE: |
| return (u64)v1 >= (u64)v2; |
| case BPF_JLT: |
| return (u64)v1 < (u64)v2; |
| case BPF_JLE: |
| return (u64)v1 <= (u64)v2; |
| case BPF_JSGT: |
| return v1 > v2; |
| case BPF_JSGE: |
| return v1 >= v2; |
| case BPF_JSLT: |
| return v1 < v2; |
| case BPF_JSLE: |
| return v1 <= v2; |
| } |
| return false; |
| } |
| |
| static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 imm) |
| { |
| int op = *(int *)arg; |
| |
| if (insns) { |
| bool match = __bpf_match_jmp_cond(dst, (s32)imm, op); |
| int i = 0; |
| |
| insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match); |
| |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| insns[i++] = BPF_JMP_IMM(op, R1, imm, 1); |
| if (!match) |
| insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| return 5 + 1; |
| } |
| |
| static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 imm) |
| { |
| int op = *(int *)arg; |
| |
| if (insns) { |
| bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op); |
| int i = 0; |
| |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1); |
| if (!match) |
| insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| return 5; |
| } |
| |
| static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int op = *(int *)arg; |
| |
| if (insns) { |
| bool match = __bpf_match_jmp_cond(dst, src, op); |
| int i = 0; |
| |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| insns[i++] = BPF_JMP_REG(op, R1, R2, 1); |
| if (!match) |
| insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| return 7; |
| } |
| |
| static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg, |
| struct bpf_insn *insns, s64 dst, s64 src) |
| { |
| int op = *(int *)arg; |
| |
| if (insns) { |
| bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op); |
| int i = 0; |
| |
| i += __bpf_ld_imm64(&insns[i], R1, dst); |
| i += __bpf_ld_imm64(&insns[i], R2, src); |
| insns[i++] = BPF_JMP32_REG(op, R1, R2, 1); |
| if (!match) |
| insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); |
| insns[i++] = BPF_EXIT_INSN(); |
| |
| return i; |
| } |
| |
| return 7; |
| } |
| |
| static int __bpf_fill_jmp_imm(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 32, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_jmp_imm); |
| } |
| |
| static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 32, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_jmp32_imm); |
| } |
| |
| static int __bpf_fill_jmp_reg(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 64, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_jmp_reg); |
| } |
| |
| static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op) |
| { |
| return __bpf_fill_pattern(self, &op, 64, 64, |
| PATTERN_BLOCK1, PATTERN_BLOCK2, |
| &__bpf_emit_jmp32_reg); |
| } |
| |
| /* JMP immediate tests */ |
| static int bpf_fill_jmp_jset_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JSET); |
| } |
| |
| static int bpf_fill_jmp_jeq_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JEQ); |
| } |
| |
| static int bpf_fill_jmp_jne_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JNE); |
| } |
| |
| static int bpf_fill_jmp_jgt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JGT); |
| } |
| |
| static int bpf_fill_jmp_jge_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JGE); |
| } |
| |
| static int bpf_fill_jmp_jlt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JLT); |
| } |
| |
| static int bpf_fill_jmp_jle_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JLE); |
| } |
| |
| static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JSGT); |
| } |
| |
| static int bpf_fill_jmp_jsge_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JSGE); |
| } |
| |
| static int bpf_fill_jmp_jslt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JSLT); |
| } |
| |
| static int bpf_fill_jmp_jsle_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_imm(self, BPF_JSLE); |
| } |
| |
| /* JMP32 immediate tests */ |
| static int bpf_fill_jmp32_jset_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JSET); |
| } |
| |
| static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JEQ); |
| } |
| |
| static int bpf_fill_jmp32_jne_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JNE); |
| } |
| |
| static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JGT); |
| } |
| |
| static int bpf_fill_jmp32_jge_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JGE); |
| } |
| |
| static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JLT); |
| } |
| |
| static int bpf_fill_jmp32_jle_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JLE); |
| } |
| |
| static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JSGT); |
| } |
| |
| static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JSGE); |
| } |
| |
| static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JSLT); |
| } |
| |
| static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_imm(self, BPF_JSLE); |
| } |
| |
| /* JMP register tests */ |
| static int bpf_fill_jmp_jset_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JSET); |
| } |
| |
| static int bpf_fill_jmp_jeq_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JEQ); |
| } |
| |
| static int bpf_fill_jmp_jne_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JNE); |
| } |
| |
| static int bpf_fill_jmp_jgt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JGT); |
| } |
| |
| static int bpf_fill_jmp_jge_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JGE); |
| } |
| |
| static int bpf_fill_jmp_jlt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JLT); |
| } |
| |
| static int bpf_fill_jmp_jle_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JLE); |
| } |
| |
| static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JSGT); |
| } |
| |
| static int bpf_fill_jmp_jsge_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JSGE); |
| } |
| |
| static int bpf_fill_jmp_jslt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JSLT); |
| } |
| |
| static int bpf_fill_jmp_jsle_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp_reg(self, BPF_JSLE); |
| } |
| |
| /* JMP32 register tests */ |
| static int bpf_fill_jmp32_jset_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JSET); |
| } |
| |
| static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JEQ); |
| } |
| |
| static int bpf_fill_jmp32_jne_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JNE); |
| } |
| |
| static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JGT); |
| } |
| |
| static int bpf_fill_jmp32_jge_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JGE); |
| } |
| |
| static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JLT); |
| } |
| |
| static int bpf_fill_jmp32_jle_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JLE); |
| } |
| |
| static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JSGT); |
| } |
| |
| static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JSGE); |
| } |
| |
| static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JSLT); |
| } |
| |
| static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self) |
| { |
| return __bpf_fill_jmp32_reg(self, BPF_JSLE); |
| } |
| |
| /* |
| * Set up a sequence of staggered jumps, forwards and backwards with |
| * increasing offset. This tests the conversion of relative jumps to |
| * JITed native jumps. On some architectures, for example MIPS, a large |
| * PC-relative jump offset may overflow the immediate field of the native |
| * conditional branch instruction, triggering a conversion to use an |
| * absolute jump instead. Since this changes the jump offsets, another |
| * offset computation pass is necessary, and that may in turn trigger |
| * another branch conversion. This jump sequence is particularly nasty |
| * in that regard. |
| * |
| * The sequence generation is parameterized by size and jump type. |
| * The size must be even, and the expected result is always size + 1. |
| * Below is an example with size=8 and result=9. |
| * |
| * ________________________Start |
| * R0 = 0 |
| * R1 = r1 |
| * R2 = r2 |
| * ,------- JMP +4 * 3______________Preamble: 4 insns |
| * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------. |
| * | | R0 = 8 | |
| * | | JMP +7 * 3 ------------------------. |
| * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------. | | |
| * | | | R0 = 6 | | | |
| * | | | JMP +5 * 3 ------------------. | | |
| * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------. | | | | |
| * | | | | R0 = 4 | | | | | |
| * | | | | JMP +3 * 3 ------------. | | | | |
| * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | | |
| * | | | | | R0 = 2 | | | | | | | |
| * | | | | | JMP +1 * 3 ------. | | | | | | |
| * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc |
| * | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off |
| * | | | | | JMP -2 * 3 ---' | | | | | | | |
| * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | | |
| * | | | | | | R0 = 3 | | | | | | |
| * | | | | | | JMP -4 * 3 ---------' | | | | | |
| * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | | |
| * | | | | | | | R0 = 5 | | | | |
| * | | | | | | | JMP -6 * 3 ---------------' | | | |
| * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | | |
| * | | | | | | | | R0 = 7 | | |
| * | | Error | | | JMP -8 * 3 ---------------------' | |
| * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------' |
| * | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns |
| * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn |
| * |
| */ |
| |
| /* The maximum size parameter */ |
| #define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1) |
| |
| /* We use a reduced number of iterations to get a reasonable execution time */ |
| #define NR_STAGGERED_JMP_RUNS 10 |
| |
| static int __bpf_fill_staggered_jumps(struct bpf_test *self, |
| const struct bpf_insn *jmp, |
| u64 r1, u64 r2) |
| { |
| int size = self->test[0].result - 1; |
| int len = 4 + 3 * (size + 1); |
| struct bpf_insn *insns; |
| int off, ind; |
| |
| insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); |
| if (!insns) |
| return -ENOMEM; |
| |
| /* Preamble */ |
| insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0); |
| insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1); |
| insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2); |
| insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2); |
| |
| /* Sequence */ |
| for (ind = 0, off = size; ind <= size; ind++, off -= 2) { |
| struct bpf_insn *ins = &insns[4 + 3 * ind]; |
| int loc; |
| |
| if (off == 0) |
| off--; |
| |
| loc = abs(off); |
| ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1, |
| 3 * (size - ind) + 1); |
| ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc); |
| ins[2] = *jmp; |
| ins[2].off = 3 * (off - 1); |
| } |
| |
| /* Return */ |
| insns[len - 1] = BPF_EXIT_INSN(); |
| |
| self->u.ptr.insns = insns; |
| self->u.ptr.len = len; |
| |
| return 0; |
| } |
| |
| /* 64-bit unconditional jump */ |
| static int bpf_fill_staggered_ja(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 0, 0); |
| } |
| |
| /* 64-bit immediate jumps */ |
| static int bpf_fill_staggered_jeq_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |
| static int bpf_fill_staggered_jne_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0); |
| } |
| |
| static int bpf_fill_staggered_jset_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0); |
| } |
| |
| static int bpf_fill_staggered_jgt_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0); |
| } |
| |
| static int bpf_fill_staggered_jge_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |
| static int bpf_fill_staggered_jlt_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |
| static int bpf_fill_staggered_jle_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |
| static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -1, 0); |
| } |
| |
| static int bpf_fill_staggered_jsge_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -2, 0); |
| } |
| |
| static int bpf_fill_staggered_jslt_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -2, 0); |
| } |
| |
| static int bpf_fill_staggered_jsle_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -1, 0); |
| } |
| |
| /* 64-bit register jumps */ |
| static int bpf_fill_staggered_jeq_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); |
| } |
| |
| static int bpf_fill_staggered_jne_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234); |
| } |
| |
| static int bpf_fill_staggered_jset_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82); |
| } |
| |
| static int bpf_fill_staggered_jgt_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234); |
| } |
| |
| static int bpf_fill_staggered_jge_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); |
| } |
| |
| static int bpf_fill_staggered_jlt_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000); |
| } |
| |
| static int bpf_fill_staggered_jle_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); |
| } |
| |
| static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -1, -2); |
| } |
| |
| static int bpf_fill_staggered_jsge_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -2, -2); |
| } |
| |
| static int bpf_fill_staggered_jslt_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -2, -1); |
| } |
| |
| static int bpf_fill_staggered_jsle_reg(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, -1, -1); |
| } |
| |
| /* 32-bit immediate jumps */ |
| static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |
| static int bpf_fill_staggered_jne32_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0); |
| } |
| |
| static int bpf_fill_staggered_jset32_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0); |
| } |
| |
| static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0); |
| } |
| |
| static int bpf_fill_staggered_jge32_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |
| static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |
| static int bpf_fill_staggered_jle32_imm(struct bpf_test *self) |
| { |
| struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0); |
| |
| return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); |
| } |
| |