blob: 191009979bcb0874d27df39dad70be81c9c77175 [file] [log] [blame]
Jens Axboef9ead182022-05-25 06:25:13 -06001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/net.h>
4#include <linux/uio.h>
5
Jens Axboe43e0bbb2022-07-07 14:30:09 -06006#include "alloc_cache.h"
7
Jens Axboef9ead182022-05-25 06:25:13 -06008struct io_async_msghdr {
Breno Leitaoe1fe7ee2023-02-23 08:43:53 -08009#if defined(CONFIG_NET)
Jens Axboe43e0bbb2022-07-07 14:30:09 -060010 union {
11 struct iovec fast_iov[UIO_FASTIOV];
Dylan Yudaken9bb66902022-07-14 04:02:58 -070012 struct {
13 struct iovec fast_iov_one;
14 __kernel_size_t controllen;
15 int namelen;
16 __kernel_size_t payloadlen;
17 };
Jens Axboe43e0bbb2022-07-07 14:30:09 -060018 struct io_cache_entry cache;
19 };
Jens Axboef9ead182022-05-25 06:25:13 -060020 /* points to an allocated iov, if NULL we use fast_iov instead */
21 struct iovec *free_iov;
22 struct sockaddr __user *uaddr;
23 struct msghdr msg;
24 struct sockaddr_storage addr;
Breno Leitaoe1fe7ee2023-02-23 08:43:53 -080025#endif
Jens Axboef9ead182022-05-25 06:25:13 -060026};
27
Breno Leitaoe1fe7ee2023-02-23 08:43:53 -080028#if defined(CONFIG_NET)
29
Jens Axboef9ead182022-05-25 06:25:13 -060030struct io_async_connect {
31 struct sockaddr_storage address;
32};
33
34int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
35int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
36
37int io_sendmsg_prep_async(struct io_kiocb *req);
38void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
39int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
40int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
Pavel Begunkov516e82f2022-09-21 12:17:51 +010041
Jens Axboef9ead182022-05-25 06:25:13 -060042int io_send(struct io_kiocb *req, unsigned int issue_flags);
Pavel Begunkov516e82f2022-09-21 12:17:51 +010043int io_send_prep_async(struct io_kiocb *req);
Jens Axboef9ead182022-05-25 06:25:13 -060044
45int io_recvmsg_prep_async(struct io_kiocb *req);
46int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
47int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
48int io_recv(struct io_kiocb *req, unsigned int issue_flags);
49
Pavel Begunkov7e6b6382022-09-21 12:17:48 +010050void io_sendrecv_fail(struct io_kiocb *req);
51
Jens Axboef9ead182022-05-25 06:25:13 -060052int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
53int io_accept(struct io_kiocb *req, unsigned int issue_flags);
54
55int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
56int io_socket(struct io_kiocb *req, unsigned int issue_flags);
57
58int io_connect_prep_async(struct io_kiocb *req);
59int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
60int io_connect(struct io_kiocb *req, unsigned int issue_flags);
Jens Axboe43e0bbb2022-07-07 14:30:09 -060061
Pavel Begunkovb0e9b552022-09-21 12:17:52 +010062int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
Pavel Begunkov493108d2022-09-21 12:17:54 +010063int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
Pavel Begunkovb0e9b552022-09-21 12:17:52 +010064int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
65void io_send_zc_cleanup(struct io_kiocb *req);
Pavel Begunkov06a54642022-07-12 21:52:43 +010066
Jens Axboe43e0bbb2022-07-07 14:30:09 -060067void io_netmsg_cache_free(struct io_cache_entry *entry);
68#else
69static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
70{
71}
Jens Axboef9ead182022-05-25 06:25:13 -060072#endif