blob: 8084e7408c0ae9065f57bc463921cb985fd68c5e [file] [log] [blame]
Jason A. Donenfelde7096c12019-12-09 00:27:34 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#include "queueing.h"
Jason A. Donenfeldec59f122022-03-29 21:31:24 -04007#include <linux/skb_array.h>
Jason A. Donenfelde7096c12019-12-09 00:27:34 +01008
9struct multicore_worker __percpu *
10wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
11{
12 int cpu;
Jason A. Donenfeld8b5553a2021-02-22 17:25:48 +010013 struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010014
15 if (!worker)
16 return NULL;
17
18 for_each_possible_cpu(cpu) {
19 per_cpu_ptr(worker, cpu)->ptr = ptr;
20 INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
21 }
22 return worker;
23}
24
25int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
Jason A. Donenfeld8b5553a2021-02-22 17:25:48 +010026 unsigned int len)
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010027{
28 int ret;
29
30 memset(queue, 0, sizeof(*queue));
31 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
32 if (ret)
33 return ret;
Jason A. Donenfeld8b5553a2021-02-22 17:25:48 +010034 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
35 if (!queue->worker) {
36 ptr_ring_cleanup(&queue->ring, NULL);
37 return -ENOMEM;
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010038 }
39 return 0;
40}
41
Jason A. Donenfeld886fcee2021-11-29 10:39:26 -050042void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010043{
Jason A. Donenfeld8b5553a2021-02-22 17:25:48 +010044 free_percpu(queue->worker);
Jason A. Donenfeld886fcee2021-11-29 10:39:26 -050045 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
Jason A. Donenfeldec59f122022-03-29 21:31:24 -040046 ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010047}
Jason A. Donenfeld8b5553a2021-02-22 17:25:48 +010048
49#define NEXT(skb) ((skb)->prev)
50#define STUB(queue) ((struct sk_buff *)&queue->empty)
51
52void wg_prev_queue_init(struct prev_queue *queue)
53{
54 NEXT(STUB(queue)) = NULL;
55 queue->head = queue->tail = STUB(queue);
56 queue->peeked = NULL;
57 atomic_set(&queue->count, 0);
58 BUILD_BUG_ON(
59 offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
60 offsetof(struct prev_queue, empty) ||
61 offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
62 offsetof(struct prev_queue, empty));
63}
64
65static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
66{
67 WRITE_ONCE(NEXT(skb), NULL);
68 WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
69}
70
71bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
72{
73 if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
74 return false;
75 __wg_prev_queue_enqueue(queue, skb);
76 return true;
77}
78
79struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
80{
81 struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
82
83 if (tail == STUB(queue)) {
84 if (!next)
85 return NULL;
86 queue->tail = next;
87 tail = next;
88 next = smp_load_acquire(&NEXT(next));
89 }
90 if (next) {
91 queue->tail = next;
92 atomic_dec(&queue->count);
93 return tail;
94 }
95 if (tail != READ_ONCE(queue->head))
96 return NULL;
97 __wg_prev_queue_enqueue(queue, STUB(queue));
98 next = smp_load_acquire(&NEXT(tail));
99 if (next) {
100 queue->tail = next;
101 atomic_dec(&queue->count);
102 return tail;
103 }
104 return NULL;
105}
106
107#undef NEXT
108#undef STUB