blob: 77e33e1e340e314fa285f0e7f235119e52a95a7a [file] [log] [blame]
Jakub Kicinskic618db22022-07-14 22:22:33 -07001// SPDX-License-Identifier: GPL-2.0-only
Jakub Kicinski84c61fe2022-07-22 16:50:33 -07002/* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */
Jakub Kicinskic618db22022-07-14 22:22:33 -07003
4#include <linux/skbuff.h>
Mina Almasryf6d827b2024-04-10 12:05:01 -07005#include <linux/skbuff_ref.h>
Jakub Kicinski84c61fe2022-07-22 16:50:33 -07006#include <linux/workqueue.h>
7#include <net/strparser.h>
8#include <net/tcp.h>
9#include <net/sock.h>
10#include <net/tls.h>
Jakub Kicinskic618db22022-07-14 22:22:33 -070011
12#include "tls.h"
13
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070014static struct workqueue_struct *tls_strp_wq;
Jakub Kicinskid4e5db62022-07-22 16:50:30 -070015
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070016static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
17{
18 if (strp->stopped)
19 return;
20
21 strp->stopped = 1;
22
23 /* Report an error on the lower socket */
Jakub Kicinski8a0d57d2023-05-24 22:17:41 -070024 WRITE_ONCE(strp->sk->sk_err, -err);
25 /* Paired with smp_rmb() in tcp_poll() */
26 smp_wmb();
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070027 sk_error_report(strp->sk);
28}
29
30static void tls_strp_anchor_free(struct tls_strparser *strp)
31{
32 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
33
34 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -070035 if (!strp->copy_mode)
36 shinfo->frag_list = NULL;
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070037 consume_skb(strp->anchor);
38 strp->anchor = NULL;
39}
40
Jakub Kicinskic1c607b2023-05-16 18:50:40 -070041static struct sk_buff *
42tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
43 int offset, int len)
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070044{
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070045 struct sk_buff *skb;
Jakub Kicinskic1c607b2023-05-16 18:50:40 -070046 int i, err;
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070047
Jakub Kicinskic1c607b2023-05-16 18:50:40 -070048 skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070049 &err, strp->sk->sk_allocation);
50 if (!skb)
51 return NULL;
52
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070053 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
54 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
55
Jakub Kicinskic1c607b2023-05-16 18:50:40 -070056 WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070057 skb_frag_address(frag),
58 skb_frag_size(frag)));
59 offset += skb_frag_size(frag);
60 }
61
Jakub Kicinskic1c607b2023-05-16 18:50:40 -070062 skb->len = len;
63 skb->data_len = len;
64 skb_copy_header(skb, in_skb);
65 return skb;
66}
67
68/* Create a new skb with the contents of input copied to its page frags */
69static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
70{
71 struct strp_msg *rxm;
72 struct sk_buff *skb;
73
74 skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
75 strp->stm.full_len);
76 if (!skb)
77 return NULL;
78
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070079 rxm = strp_msg(skb);
80 rxm->offset = 0;
Jakub Kicinskid4e5db62022-07-22 16:50:30 -070081 return skb;
82}
83
Jakub Kicinski84c61fe2022-07-22 16:50:33 -070084/* Steal the input skb, input msg is invalid after calling this function */
85struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx)
86{
87 struct tls_strparser *strp = &ctx->strp;
88
89#ifdef CONFIG_TLS_DEVICE
90 DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted);
91#else
92 /* This function turns an input into an output,
93 * that can only happen if we have offload.
94 */
95 WARN_ON(1);
96#endif
97
98 if (strp->copy_mode) {
99 struct sk_buff *skb;
100
101 /* Replace anchor with an empty skb, this is a little
102 * dangerous but __tls_cur_msg() warns on empty skbs
103 * so hopefully we'll catch abuses.
104 */
105 skb = alloc_skb(0, strp->sk->sk_allocation);
106 if (!skb)
107 return NULL;
108
109 swap(strp->anchor, skb);
110 return skb;
111 }
112
113 return tls_strp_msg_make_copy(strp);
114}
115
116/* Force the input skb to be in copy mode. The data ownership remains
117 * with the input skb itself (meaning unpause will wipe it) but it can
118 * be modified.
119 */
Jakub Kicinski8b3c59a2022-07-22 16:50:32 -0700120int tls_strp_msg_cow(struct tls_sw_context_rx *ctx)
121{
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700122 struct tls_strparser *strp = &ctx->strp;
123 struct sk_buff *skb;
Jakub Kicinski8b3c59a2022-07-22 16:50:32 -0700124
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700125 if (strp->copy_mode)
126 return 0;
127
128 skb = tls_strp_msg_make_copy(strp);
129 if (!skb)
130 return -ENOMEM;
131
132 tls_strp_anchor_free(strp);
133 strp->anchor = skb;
134
135 tcp_read_done(strp->sk, strp->stm.full_len);
136 strp->copy_mode = 1;
137
Jakub Kicinski8b3c59a2022-07-22 16:50:32 -0700138 return 0;
139}
140
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700141/* Make a clone (in the skb sense) of the input msg to keep a reference
142 * to the underlying data. The reference-holding skbs get placed on
143 * @dst.
144 */
145int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
Jakub Kicinskic618db22022-07-14 22:22:33 -0700146{
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700147 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
Jakub Kicinskic618db22022-07-14 22:22:33 -0700148
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700149 if (strp->copy_mode) {
150 struct sk_buff *skb;
151
152 WARN_ON_ONCE(!shinfo->nr_frags);
153
154 /* We can't skb_clone() the anchor, it gets wiped by unpause */
155 skb = alloc_skb(0, strp->sk->sk_allocation);
156 if (!skb)
157 return -ENOMEM;
158
159 __skb_queue_tail(dst, strp->anchor);
160 strp->anchor = skb;
161 } else {
162 struct sk_buff *iter, *clone;
163 int chunk, len, offset;
164
165 offset = strp->stm.offset;
166 len = strp->stm.full_len;
167 iter = shinfo->frag_list;
168
169 while (len > 0) {
170 if (iter->len <= offset) {
171 offset -= iter->len;
172 goto next;
173 }
174
175 chunk = iter->len - offset;
176 offset = 0;
177
178 clone = skb_clone(iter, strp->sk->sk_allocation);
179 if (!clone)
180 return -ENOMEM;
181 __skb_queue_tail(dst, clone);
182
183 len -= chunk;
184next:
185 iter = iter->next;
186 }
187 }
188
Jakub Kicinskic618db22022-07-14 22:22:33 -0700189 return 0;
190}
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700191
192static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
193{
194 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
195 int i;
196
197 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
198
199 for (i = 0; i < shinfo->nr_frags; i++)
200 __skb_frag_unref(&shinfo->frags[i], false);
201 shinfo->nr_frags = 0;
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -0700202 if (strp->copy_mode) {
203 kfree_skb_list(shinfo->frag_list);
204 shinfo->frag_list = NULL;
205 }
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700206 strp->copy_mode = 0;
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -0700207 strp->mixed_decrypted = 0;
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700208}
209
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -0700210static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
211 struct sk_buff *in_skb, unsigned int offset,
212 size_t in_len)
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700213{
Yang Li8fd1e152022-07-28 11:10:19 +0800214 size_t len, chunk;
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -0700215 skb_frag_t *frag;
Yang Li8fd1e152022-07-28 11:10:19 +0800216 int sz;
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700217
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700218 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
219
220 len = in_len;
221 /* First make sure we got the header */
222 if (!strp->stm.full_len) {
223 /* Assume one page is more than enough for headers */
224 chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
225 WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
226 skb_frag_address(frag) +
227 skb_frag_size(frag),
228 chunk));
229
Jakub Kicinski8b0c0dc2023-05-16 18:50:39 -0700230 skb->len += chunk;
231 skb->data_len += chunk;
232 skb_frag_size_add(frag, chunk);
233
234 sz = tls_rx_msg_size(strp, skb);
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -0700235 if (sz < 0)
236 return sz;
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700237
238 /* We may have over-read, sz == 0 is guaranteed under-read */
Jakub Kicinski8b0c0dc2023-05-16 18:50:39 -0700239 if (unlikely(sz && sz < skb->len)) {
240 int over = skb->len - sz;
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700241
Jakub Kicinski8b0c0dc2023-05-16 18:50:39 -0700242 WARN_ON_ONCE(over > chunk);
243 skb->len -= over;
244 skb->data_len -= over;
245 skb_frag_size_add(frag, -over);
246
247 chunk -= over;
248 }
249
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700250 frag++;
251 len -= chunk;
252 offset += chunk;
253
254 strp->stm.full_len = sz;
255 if (!strp->stm.full_len)
256 goto read_done;
257 }
258
259 /* Load up more data */
260 while (len && strp->stm.full_len > skb->len) {
261 chunk = min_t(size_t, len, strp->stm.full_len - skb->len);
262 chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag));
263 WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
264 skb_frag_address(frag) +
265 skb_frag_size(frag),
266 chunk));
267
268 skb->len += chunk;
269 skb->data_len += chunk;
270 skb_frag_size_add(frag, chunk);
271 frag++;
272 len -= chunk;
273 offset += chunk;
274 }
275
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -0700276read_done:
277 return in_len - len;
278}
279
280static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
281 struct sk_buff *in_skb, unsigned int offset,
282 size_t in_len)
283{
284 struct sk_buff *nskb, *first, *last;
285 struct skb_shared_info *shinfo;
286 size_t chunk;
287 int sz;
288
289 if (strp->stm.full_len)
290 chunk = strp->stm.full_len - skb->len;
291 else
292 chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
293 chunk = min(chunk, in_len);
294
295 nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
296 if (!nskb)
297 return -ENOMEM;
298
299 shinfo = skb_shinfo(skb);
300 if (!shinfo->frag_list) {
301 shinfo->frag_list = nskb;
302 nskb->prev = nskb;
303 } else {
304 first = shinfo->frag_list;
305 last = first->prev;
306 last->next = nskb;
307 first->prev = nskb;
308 }
309
310 skb->len += chunk;
311 skb->data_len += chunk;
312
313 if (!strp->stm.full_len) {
314 sz = tls_rx_msg_size(strp, skb);
315 if (sz < 0)
316 return sz;
317
318 /* We may have over-read, sz == 0 is guaranteed under-read */
319 if (unlikely(sz && sz < skb->len)) {
320 int over = skb->len - sz;
321
322 WARN_ON_ONCE(over > chunk);
323 skb->len -= over;
324 skb->data_len -= over;
325 __pskb_trim(nskb, nskb->len - over);
326
327 chunk -= over;
328 }
329
330 strp->stm.full_len = sz;
331 }
332
333 return chunk;
334}
335
336static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
337 unsigned int offset, size_t in_len)
338{
339 struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
340 struct sk_buff *skb;
341 int ret;
342
343 if (strp->msg_ready)
344 return 0;
345
346 skb = strp->anchor;
347 if (!skb->len)
348 skb_copy_decrypted(skb, in_skb);
349 else
350 strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
351
352 if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
353 ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
354 else
355 ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
356 if (ret < 0) {
357 desc->error = ret;
358 ret = 0;
359 }
360
361 if (strp->stm.full_len && strp->stm.full_len == skb->len) {
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700362 desc->count = 0;
363
Sabrina Dubroca08443702024-04-24 12:25:47 +0200364 WRITE_ONCE(strp->msg_ready, 1);
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700365 tls_rx_msg_ready(strp);
366 }
367
Jakub Kicinskieca9bfa2023-05-16 18:50:41 -0700368 return ret;
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700369}
370
371static int tls_strp_read_copyin(struct tls_strparser *strp)
372{
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700373 read_descriptor_t desc;
374
375 desc.arg.data = strp;
376 desc.error = 0;
377 desc.count = 1; /* give more than one skb per call */
378
379 /* sk should be locked here, so okay to do read_sock */
Hannes Reinecke11863c62023-07-26 21:15:54 +0200380 tcp_read_sock(strp->sk, &desc, tls_strp_copyin);
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700381
382 return desc.error;
383}
384
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700385static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700386{
387 struct skb_shared_info *shinfo;
388 struct page *page;
389 int need_spc, len;
390
391 /* If the rbuf is small or rcv window has collapsed to 0 we need
392 * to read the data out. Otherwise the connection will stall.
393 * Without pressure threshold of INT_MAX will never be ready.
394 */
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700395 if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700396 return 0;
397
398 shinfo = skb_shinfo(strp->anchor);
399 shinfo->frag_list = NULL;
400
401 /* If we don't know the length go max plus page for cipher overhead */
402 need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
403
404 for (len = need_spc; len > 0; len -= PAGE_SIZE) {
405 page = alloc_page(strp->sk->sk_allocation);
406 if (!page) {
407 tls_strp_flush_anchor_copy(strp);
408 return -ENOMEM;
409 }
410
411 skb_fill_page_desc(strp->anchor, shinfo->nr_frags++,
412 page, 0, 0);
413 }
414
415 strp->copy_mode = 1;
416 strp->stm.offset = 0;
417
418 strp->anchor->len = 0;
419 strp->anchor->data_len = 0;
420 strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
421
422 tls_strp_read_copyin(strp);
423
424 return 0;
425}
426
Jakub Kicinski14c4be92023-05-16 18:50:38 -0700427static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700428{
429 unsigned int len = strp->stm.offset + strp->stm.full_len;
Jakub Kicinski14c4be92023-05-16 18:50:38 -0700430 struct sk_buff *first, *skb;
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700431 u32 seq;
432
Jakub Kicinski14c4be92023-05-16 18:50:38 -0700433 first = skb_shinfo(strp->anchor)->frag_list;
434 skb = first;
435 seq = TCP_SKB_CB(first)->seq;
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700436
Jakub Kicinski14c4be92023-05-16 18:50:38 -0700437 /* Make sure there's no duplicate data in the queue,
438 * and the decrypted status matches.
439 */
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700440 while (skb->len < len) {
441 seq += skb->len;
442 len -= skb->len;
443 skb = skb->next;
444
445 if (TCP_SKB_CB(skb)->seq != seq)
446 return false;
Jakub Kicinski14c4be92023-05-16 18:50:38 -0700447 if (skb_cmp_decrypted(first, skb))
448 return false;
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700449 }
450
451 return true;
452}
453
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700454static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
455{
456 struct tcp_sock *tp = tcp_sk(strp->sk);
457 struct sk_buff *first;
458 u32 offset;
459
460 first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset);
461 if (WARN_ON_ONCE(!first))
462 return;
463
464 /* Bestow the state onto the anchor */
465 strp->anchor->len = offset + len;
466 strp->anchor->data_len = offset + len;
467 strp->anchor->truesize = offset + len;
468
469 skb_shinfo(strp->anchor)->frag_list = first;
470
471 skb_copy_header(strp->anchor, first);
472 strp->anchor->destructor = NULL;
473
474 strp->stm.offset = offset;
475}
476
477void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
478{
479 struct strp_msg *rxm;
480 struct tls_msg *tlm;
481
482 DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready);
483 DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
484
485 if (!strp->copy_mode && force_refresh) {
486 if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
487 return;
488
489 tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
490 }
491
492 rxm = strp_msg(strp->anchor);
493 rxm->full_len = strp->stm.full_len;
494 rxm->offset = strp->stm.offset;
495 tlm = tls_msg(strp->anchor);
496 tlm->control = strp->mark;
497}
498
499/* Called with lock held on lower socket */
500static int tls_strp_read_sock(struct tls_strparser *strp)
501{
502 int sz, inq;
503
504 inq = tcp_inq(strp->sk);
505 if (inq < 1)
506 return 0;
507
508 if (unlikely(strp->copy_mode))
509 return tls_strp_read_copyin(strp);
510
511 if (inq < strp->stm.full_len)
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700512 return tls_strp_read_copy(strp, true);
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700513
514 if (!strp->stm.full_len) {
515 tls_strp_load_anchor_with_queue(strp, inq);
516
517 sz = tls_rx_msg_size(strp, strp->anchor);
518 if (sz < 0) {
519 tls_strp_abort_strp(strp, sz);
520 return sz;
521 }
522
523 strp->stm.full_len = sz;
524
525 if (!strp->stm.full_len || inq < strp->stm.full_len)
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700526 return tls_strp_read_copy(strp, true);
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700527 }
528
Jakub Kicinski14c4be92023-05-16 18:50:38 -0700529 if (!tls_strp_check_queue_ok(strp))
Jakub Kicinski0d87bbd32022-10-12 15:55:20 -0700530 return tls_strp_read_copy(strp, false);
531
Sabrina Dubroca08443702024-04-24 12:25:47 +0200532 WRITE_ONCE(strp->msg_ready, 1);
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700533 tls_rx_msg_ready(strp);
534
535 return 0;
536}
537
538void tls_strp_check_rcv(struct tls_strparser *strp)
539{
540 if (unlikely(strp->stopped) || strp->msg_ready)
541 return;
542
543 if (tls_strp_read_sock(strp) == -ENOMEM)
544 queue_work(tls_strp_wq, &strp->work);
545}
546
547/* Lower sock lock held */
548void tls_strp_data_ready(struct tls_strparser *strp)
549{
550 /* This check is needed to synchronize with do_tls_strp_work.
551 * do_tls_strp_work acquires a process lock (lock_sock) whereas
552 * the lock held here is bh_lock_sock. The two locks can be
553 * held by different threads at the same time, but bh_lock_sock
554 * allows a thread in BH context to safely check if the process
555 * lock is held. In this case, if the lock is held, queue work.
556 */
557 if (sock_owned_by_user_nocheck(strp->sk)) {
558 queue_work(tls_strp_wq, &strp->work);
559 return;
560 }
561
562 tls_strp_check_rcv(strp);
563}
564
565static void tls_strp_work(struct work_struct *w)
566{
567 struct tls_strparser *strp =
568 container_of(w, struct tls_strparser, work);
569
570 lock_sock(strp->sk);
571 tls_strp_check_rcv(strp);
572 release_sock(strp->sk);
573}
574
575void tls_strp_msg_done(struct tls_strparser *strp)
576{
577 WARN_ON(!strp->stm.full_len);
578
579 if (likely(!strp->copy_mode))
580 tcp_read_done(strp->sk, strp->stm.full_len);
581 else
582 tls_strp_flush_anchor_copy(strp);
583
Sabrina Dubroca08443702024-04-24 12:25:47 +0200584 WRITE_ONCE(strp->msg_ready, 0);
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700585 memset(&strp->stm, 0, sizeof(strp->stm));
586
587 tls_strp_check_rcv(strp);
588}
589
590void tls_strp_stop(struct tls_strparser *strp)
591{
592 strp->stopped = 1;
593}
594
595int tls_strp_init(struct tls_strparser *strp, struct sock *sk)
596{
597 memset(strp, 0, sizeof(*strp));
598
599 strp->sk = sk;
600
601 strp->anchor = alloc_skb(0, GFP_KERNEL);
602 if (!strp->anchor)
603 return -ENOMEM;
604
605 INIT_WORK(&strp->work, tls_strp_work);
606
607 return 0;
608}
609
610/* strp must already be stopped so that tls_strp_recv will no longer be called.
611 * Note that tls_strp_done is not called with the lower socket held.
612 */
613void tls_strp_done(struct tls_strparser *strp)
614{
615 WARN_ON(!strp->stopped);
616
617 cancel_work_sync(&strp->work);
618 tls_strp_anchor_free(strp);
619}
620
621int __init tls_strp_dev_init(void)
622{
Jakub Kicinskid11ef9c2022-07-26 20:15:23 -0700623 tls_strp_wq = create_workqueue("tls-strp");
Jakub Kicinski84c61fe2022-07-22 16:50:33 -0700624 if (unlikely(!tls_strp_wq))
625 return -ENOMEM;
626
627 return 0;
628}
629
630void tls_strp_dev_exit(void)
631{
632 destroy_workqueue(tls_strp_wq);
633}