Jason A. Donenfeld | e7096c1 | 2019-12-09 00:27:34 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. |
| 4 | */ |
| 5 | |
| 6 | #include "queueing.h" |
Jason A. Donenfeld | ec59f12 | 2022-03-29 21:31:24 -0400 | [diff] [blame] | 7 | #include <linux/skb_array.h> |
Jason A. Donenfeld | e7096c1 | 2019-12-09 00:27:34 +0100 | [diff] [blame] | 8 | |
| 9 | struct multicore_worker __percpu * |
| 10 | wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) |
| 11 | { |
| 12 | int cpu; |
Jason A. Donenfeld | 8b5553a | 2021-02-22 17:25:48 +0100 | [diff] [blame] | 13 | struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); |
Jason A. Donenfeld | e7096c1 | 2019-12-09 00:27:34 +0100 | [diff] [blame] | 14 | |
| 15 | if (!worker) |
| 16 | return NULL; |
| 17 | |
| 18 | for_each_possible_cpu(cpu) { |
| 19 | per_cpu_ptr(worker, cpu)->ptr = ptr; |
| 20 | INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); |
| 21 | } |
| 22 | return worker; |
| 23 | } |
| 24 | |
| 25 | int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, |
Jason A. Donenfeld | 8b5553a | 2021-02-22 17:25:48 +0100 | [diff] [blame] | 26 | unsigned int len) |
Jason A. Donenfeld | e7096c1 | 2019-12-09 00:27:34 +0100 | [diff] [blame] | 27 | { |
| 28 | int ret; |
| 29 | |
| 30 | memset(queue, 0, sizeof(*queue)); |
| 31 | ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); |
| 32 | if (ret) |
| 33 | return ret; |
Jason A. Donenfeld | 8b5553a | 2021-02-22 17:25:48 +0100 | [diff] [blame] | 34 | queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); |
| 35 | if (!queue->worker) { |
| 36 | ptr_ring_cleanup(&queue->ring, NULL); |
| 37 | return -ENOMEM; |
Jason A. Donenfeld | e7096c1 | 2019-12-09 00:27:34 +0100 | [diff] [blame] | 38 | } |
| 39 | return 0; |
| 40 | } |
| 41 | |
Jason A. Donenfeld | 886fcee | 2021-11-29 10:39:26 -0500 | [diff] [blame] | 42 | void wg_packet_queue_free(struct crypt_queue *queue, bool purge) |
Jason A. Donenfeld | e7096c1 | 2019-12-09 00:27:34 +0100 | [diff] [blame] | 43 | { |
Jason A. Donenfeld | 8b5553a | 2021-02-22 17:25:48 +0100 | [diff] [blame] | 44 | free_percpu(queue->worker); |
Jason A. Donenfeld | 886fcee | 2021-11-29 10:39:26 -0500 | [diff] [blame] | 45 | WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); |
Jason A. Donenfeld | ec59f12 | 2022-03-29 21:31:24 -0400 | [diff] [blame] | 46 | ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); |
Jason A. Donenfeld | e7096c1 | 2019-12-09 00:27:34 +0100 | [diff] [blame] | 47 | } |
Jason A. Donenfeld | 8b5553a | 2021-02-22 17:25:48 +0100 | [diff] [blame] | 48 | |
| 49 | #define NEXT(skb) ((skb)->prev) |
| 50 | #define STUB(queue) ((struct sk_buff *)&queue->empty) |
| 51 | |
| 52 | void wg_prev_queue_init(struct prev_queue *queue) |
| 53 | { |
| 54 | NEXT(STUB(queue)) = NULL; |
| 55 | queue->head = queue->tail = STUB(queue); |
| 56 | queue->peeked = NULL; |
| 57 | atomic_set(&queue->count, 0); |
| 58 | BUILD_BUG_ON( |
| 59 | offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - |
| 60 | offsetof(struct prev_queue, empty) || |
| 61 | offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - |
| 62 | offsetof(struct prev_queue, empty)); |
| 63 | } |
| 64 | |
| 65 | static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
| 66 | { |
| 67 | WRITE_ONCE(NEXT(skb), NULL); |
| 68 | WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); |
| 69 | } |
| 70 | |
| 71 | bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
| 72 | { |
| 73 | if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) |
| 74 | return false; |
| 75 | __wg_prev_queue_enqueue(queue, skb); |
| 76 | return true; |
| 77 | } |
| 78 | |
| 79 | struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) |
| 80 | { |
| 81 | struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); |
| 82 | |
| 83 | if (tail == STUB(queue)) { |
| 84 | if (!next) |
| 85 | return NULL; |
| 86 | queue->tail = next; |
| 87 | tail = next; |
| 88 | next = smp_load_acquire(&NEXT(next)); |
| 89 | } |
| 90 | if (next) { |
| 91 | queue->tail = next; |
| 92 | atomic_dec(&queue->count); |
| 93 | return tail; |
| 94 | } |
| 95 | if (tail != READ_ONCE(queue->head)) |
| 96 | return NULL; |
| 97 | __wg_prev_queue_enqueue(queue, STUB(queue)); |
| 98 | next = smp_load_acquire(&NEXT(tail)); |
| 99 | if (next) { |
| 100 | queue->tail = next; |
| 101 | atomic_dec(&queue->count); |
| 102 | return tail; |
| 103 | } |
| 104 | return NULL; |
| 105 | } |
| 106 | |
| 107 | #undef NEXT |
| 108 | #undef STUB |