blob: c4bb793ebf0ec1b019fee3df17164190409c4368 [file] [log] [blame]
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +01001#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/file.h>
4#include <linux/slab.h>
5#include <linux/net.h>
6#include <linux/io_uring.h>
7
8#include "io_uring.h"
9#include "notif.h"
Pavel Begunkov68ef5572022-07-12 21:52:41 +010010#include "rsrc.h"
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010011
Pavel Begunkov42385b02022-11-04 10:59:46 +000012static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked)
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010013{
Pavel Begunkov14b146b2022-07-27 10:30:41 +010014 struct io_notif_data *nd = io_notif_to_data(notif);
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010015 struct io_ring_ctx *ctx = notif->ctx;
16
Pavel Begunkov42385b02022-11-04 10:59:46 +000017 if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
18 notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
19
Pavel Begunkov14b146b2022-07-27 10:30:41 +010020 if (nd->account_pages && ctx->user) {
21 __io_unaccount_mem(ctx->user, nd->account_pages);
22 nd->account_pages = 0;
Pavel Begunkove29e3bd2022-07-12 21:52:44 +010023 }
Pavel Begunkov40725d12022-11-04 10:59:45 +000024 io_req_task_complete(notif, locked);
25}
26
Pavel Begunkov7fa8e842022-11-04 10:59:43 +000027static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg,
28 bool success)
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010029{
Pavel Begunkov14b146b2022-07-27 10:30:41 +010030 struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
31 struct io_kiocb *notif = cmd_to_io_kiocb(nd);
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010032
Pavel Begunkov40725d12022-11-04 10:59:45 +000033 if (refcount_dec_and_test(&uarg->refcnt))
34 io_req_task_work_add(notif);
35}
36
37static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg,
38 bool success)
39{
40 struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
41
Stefan Metzmachere307e662022-10-27 20:34:45 +020042 if (nd->zc_report) {
43 if (success && !nd->zc_used && skb)
44 WRITE_ONCE(nd->zc_used, true);
45 else if (!success && !nd->zc_copied)
46 WRITE_ONCE(nd->zc_copied, true);
47 }
Pavel Begunkov40725d12022-11-04 10:59:45 +000048 io_tx_ubuf_callback(skb, uarg, success);
49}
Stefan Metzmachere307e662022-10-27 20:34:45 +020050
Pavel Begunkov40725d12022-11-04 10:59:45 +000051void io_notif_set_extended(struct io_kiocb *notif)
52{
53 struct io_notif_data *nd = io_notif_to_data(notif);
54
Pavel Begunkov42385b02022-11-04 10:59:46 +000055 if (nd->uarg.callback != io_tx_ubuf_callback_ext) {
56 nd->account_pages = 0;
57 nd->zc_report = false;
58 nd->zc_used = false;
59 nd->zc_copied = false;
60 nd->uarg.callback = io_tx_ubuf_callback_ext;
61 notif->io_task_work.func = io_notif_complete_tw_ext;
62 }
Pavel Begunkoveb4a2992022-07-12 21:52:39 +010063}
64
Pavel Begunkovb48c3122022-09-01 11:54:04 +010065struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010066 __must_hold(&ctx->uring_lock)
67{
Pavel Begunkov14b146b2022-07-27 10:30:41 +010068 struct io_kiocb *notif;
69 struct io_notif_data *nd;
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010070
Pavel Begunkov14b146b2022-07-27 10:30:41 +010071 if (unlikely(!io_alloc_req_refill(ctx)))
72 return NULL;
73 notif = io_alloc_req(ctx);
74 notif->opcode = IORING_OP_NOP;
75 notif->flags = 0;
76 notif->file = NULL;
77 notif->task = current;
78 io_get_task_refs(1);
79 notif->rsrc_node = NULL;
Pavel Begunkov42385b02022-11-04 10:59:46 +000080 notif->io_task_work.func = io_req_task_complete;
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010081
Pavel Begunkov14b146b2022-07-27 10:30:41 +010082 nd = io_notif_to_data(notif);
Pavel Begunkov14b146b2022-07-27 10:30:41 +010083 nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
Pavel Begunkov7fa8e842022-11-04 10:59:43 +000084 nd->uarg.callback = io_tx_ubuf_callback;
Pavel Begunkov14b146b2022-07-27 10:30:41 +010085 refcount_set(&nd->uarg.refcnt, 1);
Pavel Begunkoveb42ceb2022-07-12 21:52:38 +010086 return notif;
87}