blob: 868aa4de2e4c4ef7bcfc3c704ae10791f1725ff9 [file] [log] [blame]
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/inet.h>
15#include <linux/llist.h>
16#include <crypto/hash.h>
Peilin Ye40e0b092023-01-19 16:45:16 -080017#include <trace/events/sock.h>
Sagi Grimberg872d26a2018-12-03 17:52:15 -080018
19#include "nvmet.h"
20
21#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
22
Chaitanya Kulkarni44aef3b2023-03-26 22:37:23 -070023static int param_store_val(const char *str, int *val, int min, int max)
24{
25 int ret, new_val;
26
27 ret = kstrtoint(str, 10, &new_val);
28 if (ret)
29 return -EINVAL;
30
31 if (new_val < min || new_val > max)
32 return -EINVAL;
33
34 *val = new_val;
35 return 0;
36}
37
38static int set_params(const char *str, const struct kernel_param *kp)
39{
40 return param_store_val(str, kp->arg, 0, INT_MAX);
41}
42
43static const struct kernel_param_ops set_param_ops = {
44 .set = set_params,
45 .get = param_get_int,
46};
47
Wunderlich, Mark43cc6682020-01-16 00:46:16 +000048/* Define the socket priority to use for connections were it is desirable
49 * that the NIC consider performing optimized packet processing or filtering.
50 * A non-zero value being sufficient to indicate general consideration of any
51 * possible optimization. Making it a module param allows for alternative
52 * values that may be unique for some NIC implementations.
53 */
54static int so_priority;
Chaitanya Kulkarni44aef3b2023-03-26 22:37:23 -070055device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
56MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
Wunderlich, Mark43cc6682020-01-16 00:46:16 +000057
Wunderlich, Markd8e7b462021-03-31 21:38:30 +000058/* Define a time period (in usecs) that io_work() shall sample an activated
59 * queue before determining it to be idle. This optional module behavior
60 * can enable NIC solutions that support socket optimized packet processing
61 * using advanced interrupt moderation techniques.
62 */
63static int idle_poll_period_usecs;
Chaitanya Kulkarni6fe240b2023-03-26 22:37:24 -070064device_param_cb(idle_poll_period_usecs, &set_param_ops,
65 &idle_poll_period_usecs, 0644);
Wunderlich, Markd8e7b462021-03-31 21:38:30 +000066MODULE_PARM_DESC(idle_poll_period_usecs,
Chaitanya Kulkarni6fe240b2023-03-26 22:37:24 -070067 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
Wunderlich, Markd8e7b462021-03-31 21:38:30 +000068
Sagi Grimberg872d26a2018-12-03 17:52:15 -080069#define NVMET_TCP_RECV_BUDGET 8
70#define NVMET_TCP_SEND_BUDGET 8
71#define NVMET_TCP_IO_WORK_BUDGET 64
72
73enum nvmet_tcp_send_state {
74 NVMET_TCP_SEND_DATA_PDU,
75 NVMET_TCP_SEND_DATA,
76 NVMET_TCP_SEND_R2T,
77 NVMET_TCP_SEND_DDGST,
78 NVMET_TCP_SEND_RESPONSE
79};
80
81enum nvmet_tcp_recv_state {
82 NVMET_TCP_RECV_PDU,
83 NVMET_TCP_RECV_DATA,
84 NVMET_TCP_RECV_DDGST,
85 NVMET_TCP_RECV_ERR,
86};
87
88enum {
89 NVMET_TCP_F_INIT_FAILED = (1 << 0),
90};
91
92struct nvmet_tcp_cmd {
93 struct nvmet_tcp_queue *queue;
94 struct nvmet_req req;
95
96 struct nvme_tcp_cmd_pdu *cmd_pdu;
97 struct nvme_tcp_rsp_pdu *rsp_pdu;
98 struct nvme_tcp_data_pdu *data_pdu;
99 struct nvme_tcp_r2t_pdu *r2t_pdu;
100
101 u32 rbytes_done;
102 u32 wbytes_done;
103
104 u32 pdu_len;
105 u32 pdu_recv;
106 int sg_idx;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800107 struct msghdr recv_msg;
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200108 struct bio_vec *iov;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800109 u32 flags;
110
111 struct list_head entry;
112 struct llist_node lentry;
113
114 /* send state */
115 u32 offset;
116 struct scatterlist *cur_sg;
117 enum nvmet_tcp_send_state state;
118
119 __le32 exp_ddgst;
120 __le32 recv_ddgst;
121};
122
123enum nvmet_tcp_queue_state {
124 NVMET_TCP_Q_CONNECTING,
125 NVMET_TCP_Q_LIVE,
126 NVMET_TCP_Q_DISCONNECTING,
127};
128
129struct nvmet_tcp_queue {
130 struct socket *sock;
131 struct nvmet_tcp_port *port;
132 struct work_struct io_work;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800133 struct nvmet_cq nvme_cq;
134 struct nvmet_sq nvme_sq;
135
136 /* send state */
137 struct nvmet_tcp_cmd *cmds;
138 unsigned int nr_cmds;
139 struct list_head free_list;
140 struct llist_head resp_list;
141 struct list_head resp_send_list;
142 int send_list_len;
143 struct nvmet_tcp_cmd *snd_cmd;
144
145 /* recv state */
146 int offset;
147 int left;
148 enum nvmet_tcp_recv_state rcv_state;
149 struct nvmet_tcp_cmd *cmd;
150 union nvme_tcp_pdu pdu;
151
152 /* digest state */
153 bool hdr_digest;
154 bool data_digest;
155 struct ahash_request *snd_hash;
156 struct ahash_request *rcv_hash;
157
Wunderlich, Markd8e7b462021-03-31 21:38:30 +0000158 unsigned long poll_end;
159
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800160 spinlock_t state_lock;
161 enum nvmet_tcp_queue_state state;
162
163 struct sockaddr_storage sockaddr;
164 struct sockaddr_storage sockaddr_peer;
165 struct work_struct release_work;
166
167 int idx;
168 struct list_head queue_list;
169
170 struct nvmet_tcp_cmd connect;
171
172 struct page_frag_cache pf_cache;
173
174 void (*data_ready)(struct sock *);
175 void (*state_change)(struct sock *);
176 void (*write_space)(struct sock *);
177};
178
179struct nvmet_tcp_port {
180 struct socket *sock;
181 struct work_struct accept_work;
182 struct nvmet_port *nport;
183 struct sockaddr_storage addr;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800184 void (*data_ready)(struct sock *);
185};
186
187static DEFINE_IDA(nvmet_tcp_queue_ida);
188static LIST_HEAD(nvmet_tcp_queue_list);
189static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
190
191static struct workqueue_struct *nvmet_tcp_wq;
Max Gurtovoya40aae62020-06-01 20:05:20 +0300192static const struct nvmet_fabrics_ops nvmet_tcp_ops;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800193static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
Maurizio Lombardi69b85e12021-11-16 16:49:19 +0100194static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800195
196static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
197 struct nvmet_tcp_cmd *cmd)
198{
Ziye Yanga6ce7d72020-08-22 00:48:10 +0800199 if (unlikely(!queue->nr_cmds)) {
200 /* We didn't allocate cmds yet, send 0xffff */
201 return USHRT_MAX;
202 }
203
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800204 return cmd - queue->cmds;
205}
206
207static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
208{
209 return nvme_is_write(cmd->req.cmd) &&
210 cmd->rbytes_done < cmd->req.transfer_len;
211}
212
213static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
214{
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300215 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800216}
217
218static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
219{
220 return !nvme_is_write(cmd->req.cmd) &&
221 cmd->req.transfer_len > 0 &&
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300222 !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800223}
224
225static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
226{
227 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
228 !cmd->rbytes_done;
229}
230
231static inline struct nvmet_tcp_cmd *
232nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
233{
234 struct nvmet_tcp_cmd *cmd;
235
236 cmd = list_first_entry_or_null(&queue->free_list,
237 struct nvmet_tcp_cmd, entry);
238 if (!cmd)
239 return NULL;
240 list_del_init(&cmd->entry);
241
242 cmd->rbytes_done = cmd->wbytes_done = 0;
243 cmd->pdu_len = 0;
244 cmd->pdu_recv = 0;
245 cmd->iov = NULL;
246 cmd->flags = 0;
247 return cmd;
248}
249
250static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
251{
252 if (unlikely(cmd == &cmd->queue->connect))
253 return;
254
255 list_add_tail(&cmd->entry, &cmd->queue->free_list);
256}
257
Mark Wunderlichf7790e52020-08-28 01:00:53 +0000258static inline int queue_cpu(struct nvmet_tcp_queue *queue)
259{
260 return queue->sock->sk->sk_incoming_cpu;
261}
262
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800263static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
264{
265 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
266}
267
268static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
269{
270 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
271}
272
273static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
274 void *pdu, size_t len)
275{
276 struct scatterlist sg;
277
278 sg_init_one(&sg, pdu, len);
279 ahash_request_set_crypt(hash, &sg, pdu + len, len);
280 crypto_ahash_digest(hash);
281}
282
283static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
284 void *pdu, size_t len)
285{
286 struct nvme_tcp_hdr *hdr = pdu;
287 __le32 recv_digest;
288 __le32 exp_digest;
289
290 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
291 pr_err("queue %d: header digest enabled but no header digest\n",
292 queue->idx);
293 return -EPROTO;
294 }
295
296 recv_digest = *(__le32 *)(pdu + hdr->hlen);
297 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
298 exp_digest = *(__le32 *)(pdu + hdr->hlen);
299 if (recv_digest != exp_digest) {
300 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
301 queue->idx, le32_to_cpu(recv_digest),
302 le32_to_cpu(exp_digest));
303 return -EPROTO;
304 }
305
306 return 0;
307}
308
309static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
310{
311 struct nvme_tcp_hdr *hdr = pdu;
312 u8 digest_len = nvmet_tcp_hdgst_len(queue);
313 u32 len;
314
315 len = le32_to_cpu(hdr->plen) - hdr->hlen -
316 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
317
318 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
319 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
320 return -EPROTO;
321 }
322
323 return 0;
324}
325
Maurizio Lombardi69b85e12021-11-16 16:49:19 +0100326static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
327{
Maurizio Lombardi69b85e12021-11-16 16:49:19 +0100328 kfree(cmd->iov);
329 sgl_free(cmd->req.sg);
330 cmd->iov = NULL;
331 cmd->req.sg = NULL;
332}
333
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200334static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800335{
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200336 struct bio_vec *iov = cmd->iov;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800337 struct scatterlist *sg;
338 u32 length, offset, sg_offset;
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200339 int nr_pages;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800340
341 length = cmd->pdu_len;
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200342 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800343 offset = cmd->rbytes_done;
Sagi Grimbergcb8563f2021-02-03 01:20:25 -0800344 cmd->sg_idx = offset / PAGE_SIZE;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800345 sg_offset = offset % PAGE_SIZE;
346 sg = &cmd->req.sg[cmd->sg_idx];
347
348 while (length) {
349 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
350
Christoph Hellwigfc41c972023-02-03 16:06:17 +0100351 bvec_set_page(iov, sg_page(sg), sg->length,
352 sg->offset + sg_offset);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800353
354 length -= iov_len;
355 sg = sg_next(sg);
356 iov++;
Sagi Grimbergcb8563f2021-02-03 01:20:25 -0800357 sg_offset = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800358 }
359
Al Virode4eda92022-09-15 20:25:47 -0400360 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200361 nr_pages, cmd->pdu_len);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800362}
363
364static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
365{
366 queue->rcv_state = NVMET_TCP_RECV_ERR;
367 if (queue->nvme_sq.ctrl)
368 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
369 else
370 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
371}
372
Sagi Grimberg0236d342020-05-18 10:47:48 -0700373static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
374{
375 if (status == -EPIPE || status == -ECONNRESET)
376 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
377 else
378 nvmet_tcp_fatal_error(queue);
379}
380
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800381static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
382{
383 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
384 u32 len = le32_to_cpu(sgl->length);
385
Logan Gunthorpee0bace72019-10-23 10:35:39 -0600386 if (!len)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800387 return 0;
388
389 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
390 NVME_SGL_FMT_OFFSET)) {
391 if (!nvme_is_write(cmd->req.cmd))
392 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
393
394 if (len > cmd->req.port->inline_data_size)
395 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
396 cmd->pdu_len = len;
397 }
398 cmd->req.transfer_len += len;
399
400 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
401 if (!cmd->req.sg)
402 return NVME_SC_INTERNAL;
403 cmd->cur_sg = cmd->req.sg;
404
405 if (nvmet_tcp_has_data_in(cmd)) {
406 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
407 sizeof(*cmd->iov), GFP_KERNEL);
408 if (!cmd->iov)
409 goto err;
410 }
411
412 return 0;
413err:
Maurizio Lombardi69b85e12021-11-16 16:49:19 +0100414 nvmet_tcp_free_cmd_buffers(cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800415 return NVME_SC_INTERNAL;
416}
417
Sagi Grimberged0691cf2022-06-24 00:49:53 +0300418static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800419 struct nvmet_tcp_cmd *cmd)
420{
421 ahash_request_set_crypt(hash, cmd->req.sg,
422 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
423 crypto_ahash_digest(hash);
424}
425
426static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
427{
428 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
429 struct nvmet_tcp_queue *queue = cmd->queue;
430 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
431 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
432
433 cmd->offset = 0;
434 cmd->state = NVMET_TCP_SEND_DATA_PDU;
435
436 pdu->hdr.type = nvme_tcp_c2h_data;
Sagi Grimberg70583292019-03-08 15:41:21 -0800437 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
438 NVME_TCP_F_DATA_SUCCESS : 0);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800439 pdu->hdr.hlen = sizeof(*pdu);
440 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
441 pdu->hdr.plen =
442 cpu_to_le32(pdu->hdr.hlen + hdgst +
443 cmd->req.transfer_len + ddgst);
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300444 pdu->command_id = cmd->req.cqe->command_id;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800445 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
446 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
447
448 if (queue->data_digest) {
449 pdu->hdr.flags |= NVME_TCP_F_DDGST;
Sagi Grimberged0691cf2022-06-24 00:49:53 +0300450 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800451 }
452
453 if (cmd->queue->hdr_digest) {
454 pdu->hdr.flags |= NVME_TCP_F_HDGST;
455 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
456 }
457}
458
459static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
460{
461 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
462 struct nvmet_tcp_queue *queue = cmd->queue;
463 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
464
465 cmd->offset = 0;
466 cmd->state = NVMET_TCP_SEND_R2T;
467
468 pdu->hdr.type = nvme_tcp_r2t;
469 pdu->hdr.flags = 0;
470 pdu->hdr.hlen = sizeof(*pdu);
471 pdu->hdr.pdo = 0;
472 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
473
474 pdu->command_id = cmd->req.cmd->common.command_id;
475 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
476 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
477 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
478 if (cmd->queue->hdr_digest) {
479 pdu->hdr.flags |= NVME_TCP_F_HDGST;
480 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
481 }
482}
483
484static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
485{
486 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
487 struct nvmet_tcp_queue *queue = cmd->queue;
488 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
489
490 cmd->offset = 0;
491 cmd->state = NVMET_TCP_SEND_RESPONSE;
492
493 pdu->hdr.type = nvme_tcp_rsp;
494 pdu->hdr.flags = 0;
495 pdu->hdr.hlen = sizeof(*pdu);
496 pdu->hdr.pdo = 0;
497 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
498 if (cmd->queue->hdr_digest) {
499 pdu->hdr.flags |= NVME_TCP_F_HDGST;
500 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
501 }
502}
503
504static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
505{
506 struct llist_node *node;
Sagi Grimbergb8a12e92020-06-24 12:27:16 -0700507 struct nvmet_tcp_cmd *cmd;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800508
Sagi Grimbergb8a12e92020-06-24 12:27:16 -0700509 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
510 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800511 list_add(&cmd->entry, &queue->resp_send_list);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800512 queue->send_list_len++;
513 }
514}
515
516static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
517{
518 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
519 struct nvmet_tcp_cmd, entry);
520 if (!queue->snd_cmd) {
521 nvmet_tcp_process_resp_list(queue);
522 queue->snd_cmd =
523 list_first_entry_or_null(&queue->resp_send_list,
524 struct nvmet_tcp_cmd, entry);
525 if (unlikely(!queue->snd_cmd))
526 return NULL;
527 }
528
529 list_del_init(&queue->snd_cmd->entry);
530 queue->send_list_len--;
531
532 if (nvmet_tcp_need_data_out(queue->snd_cmd))
533 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
534 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
535 nvmet_setup_r2t_pdu(queue->snd_cmd);
536 else
537 nvmet_setup_response_pdu(queue->snd_cmd);
538
539 return queue->snd_cmd;
540}
541
542static void nvmet_tcp_queue_response(struct nvmet_req *req)
543{
544 struct nvmet_tcp_cmd *cmd =
545 container_of(req, struct nvmet_tcp_cmd, req);
546 struct nvmet_tcp_queue *queue = cmd->queue;
Elad Grupibdaf1322021-03-31 17:13:14 +0800547 struct nvme_sgl_desc *sgl;
548 u32 len;
549
550 if (unlikely(cmd == queue->cmd)) {
551 sgl = &cmd->req.cmd->common.dptr.sgl;
552 len = le32_to_cpu(sgl->length);
553
554 /*
555 * Wait for inline data before processing the response.
556 * Avoid using helpers, this might happen before
557 * nvmet_req_init is completed.
558 */
559 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
Hou Pu25df1ac2021-05-20 19:30:45 +0800560 len && len <= cmd->req.port->inline_data_size &&
Elad Grupibdaf1322021-03-31 17:13:14 +0800561 nvme_is_write(cmd->req.cmd))
562 return;
563 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800564
565 llist_add(&cmd->lentry, &queue->resp_list);
Mark Wunderlichf7790e52020-08-28 01:00:53 +0000566 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800567}
568
Elad Grupibdaf1322021-03-31 17:13:14 +0800569static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
570{
571 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
572 nvmet_tcp_queue_response(&cmd->req);
573 else
574 cmd->req.execute(&cmd->req);
575}
576
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800577static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
578{
David Howellsc336a792023-06-23 23:55:05 +0100579 struct msghdr msg = {
580 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
581 };
582 struct bio_vec bvec;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800583 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
584 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
585 int ret;
586
David Howellsc336a792023-06-23 23:55:05 +0100587 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
588 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
589 ret = sock_sendmsg(cmd->queue->sock, &msg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800590 if (ret <= 0)
591 return ret;
592
593 cmd->offset += ret;
594 left -= ret;
595
596 if (left)
597 return -EAGAIN;
598
599 cmd->state = NVMET_TCP_SEND_DATA;
600 cmd->offset = 0;
601 return 1;
602}
603
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700604static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800605{
606 struct nvmet_tcp_queue *queue = cmd->queue;
607 int ret;
608
609 while (cmd->cur_sg) {
David Howellsc336a792023-06-23 23:55:05 +0100610 struct msghdr msg = {
611 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
612 };
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800613 struct page *page = sg_page(cmd->cur_sg);
David Howellsc336a792023-06-23 23:55:05 +0100614 struct bio_vec bvec;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800615 u32 left = cmd->cur_sg->length - cmd->offset;
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700616
617 if ((!last_in_batch && cmd->queue->send_list_len) ||
618 cmd->wbytes_done + left < cmd->req.transfer_len ||
619 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
David Howellsc336a792023-06-23 23:55:05 +0100620 msg.msg_flags |= MSG_MORE;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800621
David Howellsc336a792023-06-23 23:55:05 +0100622 bvec_set_page(&bvec, page, left, cmd->offset);
623 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
624 ret = sock_sendmsg(cmd->queue->sock, &msg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800625 if (ret <= 0)
626 return ret;
627
628 cmd->offset += ret;
629 cmd->wbytes_done += ret;
630
631 /* Done with sg?*/
632 if (cmd->offset == cmd->cur_sg->length) {
633 cmd->cur_sg = sg_next(cmd->cur_sg);
634 cmd->offset = 0;
635 }
636 }
637
638 if (queue->data_digest) {
639 cmd->state = NVMET_TCP_SEND_DDGST;
640 cmd->offset = 0;
641 } else {
Sagi Grimberg70583292019-03-08 15:41:21 -0800642 if (queue->nvme_sq.sqhd_disabled) {
643 cmd->queue->snd_cmd = NULL;
644 nvmet_tcp_put_cmd(cmd);
645 } else {
646 nvmet_setup_response_pdu(cmd);
647 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800648 }
Sagi Grimberg70583292019-03-08 15:41:21 -0800649
Maurizio Lombardi69b85e12021-11-16 16:49:19 +0100650 if (queue->nvme_sq.sqhd_disabled)
651 nvmet_tcp_free_cmd_buffers(cmd);
Sagi Grimberg70583292019-03-08 15:41:21 -0800652
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800653 return 1;
654
655}
656
657static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
658 bool last_in_batch)
659{
David Howellsc336a792023-06-23 23:55:05 +0100660 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
661 struct bio_vec bvec;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800662 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
663 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800664 int ret;
665
666 if (!last_in_batch && cmd->queue->send_list_len)
David Howellsc336a792023-06-23 23:55:05 +0100667 msg.msg_flags |= MSG_MORE;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800668 else
David Howellsc336a792023-06-23 23:55:05 +0100669 msg.msg_flags |= MSG_EOR;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800670
David Howellsc336a792023-06-23 23:55:05 +0100671 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
672 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
673 ret = sock_sendmsg(cmd->queue->sock, &msg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800674 if (ret <= 0)
675 return ret;
676 cmd->offset += ret;
677 left -= ret;
678
679 if (left)
680 return -EAGAIN;
681
Maurizio Lombardi69b85e12021-11-16 16:49:19 +0100682 nvmet_tcp_free_cmd_buffers(cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800683 cmd->queue->snd_cmd = NULL;
684 nvmet_tcp_put_cmd(cmd);
685 return 1;
686}
687
688static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
689{
David Howellsc336a792023-06-23 23:55:05 +0100690 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
691 struct bio_vec bvec;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800692 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
693 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800694 int ret;
695
696 if (!last_in_batch && cmd->queue->send_list_len)
David Howellsc336a792023-06-23 23:55:05 +0100697 msg.msg_flags |= MSG_MORE;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800698 else
David Howellsc336a792023-06-23 23:55:05 +0100699 msg.msg_flags |= MSG_EOR;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800700
David Howellsc336a792023-06-23 23:55:05 +0100701 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
702 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
703 ret = sock_sendmsg(cmd->queue->sock, &msg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800704 if (ret <= 0)
705 return ret;
706 cmd->offset += ret;
707 left -= ret;
708
709 if (left)
710 return -EAGAIN;
711
712 cmd->queue->snd_cmd = NULL;
713 return 1;
714}
715
Sagi Grimberge90d1722020-03-12 16:06:39 -0700716static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800717{
718 struct nvmet_tcp_queue *queue = cmd->queue;
Varun Prakash102110e2021-11-22 15:38:41 +0530719 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800720 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
721 struct kvec iov = {
Varun Prakashe790de52021-10-25 22:46:54 +0530722 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
Varun Prakash102110e2021-11-22 15:38:41 +0530723 .iov_len = left
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800724 };
725 int ret;
726
Sagi Grimberge90d1722020-03-12 16:06:39 -0700727 if (!last_in_batch && cmd->queue->send_list_len)
728 msg.msg_flags |= MSG_MORE;
Sagi Grimbergf381ab12020-05-12 18:01:43 -0700729 else
730 msg.msg_flags |= MSG_EOR;
Sagi Grimberge90d1722020-03-12 16:06:39 -0700731
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800732 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
733 if (unlikely(ret <= 0))
734 return ret;
735
736 cmd->offset += ret;
Varun Prakash102110e2021-11-22 15:38:41 +0530737 left -= ret;
738
739 if (left)
740 return -EAGAIN;
Sagi Grimberg70583292019-03-08 15:41:21 -0800741
742 if (queue->nvme_sq.sqhd_disabled) {
743 cmd->queue->snd_cmd = NULL;
744 nvmet_tcp_put_cmd(cmd);
745 } else {
746 nvmet_setup_response_pdu(cmd);
747 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800748 return 1;
749}
750
751static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
752 bool last_in_batch)
753{
754 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
755 int ret = 0;
756
757 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
758 cmd = nvmet_tcp_fetch_cmd(queue);
759 if (unlikely(!cmd))
760 return 0;
761 }
762
763 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
764 ret = nvmet_try_send_data_pdu(cmd);
765 if (ret <= 0)
766 goto done_send;
767 }
768
769 if (cmd->state == NVMET_TCP_SEND_DATA) {
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700770 ret = nvmet_try_send_data(cmd, last_in_batch);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800771 if (ret <= 0)
772 goto done_send;
773 }
774
775 if (cmd->state == NVMET_TCP_SEND_DDGST) {
Sagi Grimberge90d1722020-03-12 16:06:39 -0700776 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800777 if (ret <= 0)
778 goto done_send;
779 }
780
781 if (cmd->state == NVMET_TCP_SEND_R2T) {
782 ret = nvmet_try_send_r2t(cmd, last_in_batch);
783 if (ret <= 0)
784 goto done_send;
785 }
786
787 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
788 ret = nvmet_try_send_response(cmd, last_in_batch);
789
790done_send:
791 if (ret < 0) {
792 if (ret == -EAGAIN)
793 return 0;
794 return ret;
795 }
796
797 return 1;
798}
799
800static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
801 int budget, int *sends)
802{
803 int i, ret = 0;
804
805 for (i = 0; i < budget; i++) {
806 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
Sagi Grimberg0236d342020-05-18 10:47:48 -0700807 if (unlikely(ret < 0)) {
808 nvmet_tcp_socket_error(queue, ret);
809 goto done;
810 } else if (ret == 0) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800811 break;
Sagi Grimberg0236d342020-05-18 10:47:48 -0700812 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800813 (*sends)++;
814 }
Sagi Grimberg0236d342020-05-18 10:47:48 -0700815done:
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800816 return ret;
817}
818
819static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
820{
821 queue->offset = 0;
822 queue->left = sizeof(struct nvme_tcp_hdr);
823 queue->cmd = NULL;
824 queue->rcv_state = NVMET_TCP_RECV_PDU;
825}
826
827static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
828{
829 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
830
831 ahash_request_free(queue->rcv_hash);
832 ahash_request_free(queue->snd_hash);
833 crypto_free_ahash(tfm);
834}
835
836static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
837{
838 struct crypto_ahash *tfm;
839
840 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
841 if (IS_ERR(tfm))
842 return PTR_ERR(tfm);
843
844 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
845 if (!queue->snd_hash)
846 goto free_tfm;
847 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
848
849 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
850 if (!queue->rcv_hash)
851 goto free_snd_hash;
852 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
853
854 return 0;
855free_snd_hash:
856 ahash_request_free(queue->snd_hash);
857free_tfm:
858 crypto_free_ahash(tfm);
859 return -ENOMEM;
860}
861
862
863static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
864{
865 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
866 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
867 struct msghdr msg = {};
868 struct kvec iov;
869 int ret;
870
871 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
872 pr_err("bad nvme-tcp pdu length (%d)\n",
873 le32_to_cpu(icreq->hdr.plen));
874 nvmet_tcp_fatal_error(queue);
875 }
876
877 if (icreq->pfv != NVME_TCP_PFV_1_0) {
878 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
879 return -EPROTO;
880 }
881
882 if (icreq->hpda != 0) {
883 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
884 icreq->hpda);
885 return -EPROTO;
886 }
887
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800888 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
889 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
890 if (queue->hdr_digest || queue->data_digest) {
891 ret = nvmet_tcp_alloc_crypto(queue);
892 if (ret)
893 return ret;
894 }
895
896 memset(icresp, 0, sizeof(*icresp));
897 icresp->hdr.type = nvme_tcp_icresp;
898 icresp->hdr.hlen = sizeof(*icresp);
899 icresp->hdr.pdo = 0;
900 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
901 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
Sagi Grimberg9cda34e2020-02-25 16:42:27 -0800902 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800903 icresp->cpda = 0;
904 if (queue->hdr_digest)
905 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
906 if (queue->data_digest)
907 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
908
909 iov.iov_base = icresp;
910 iov.iov_len = sizeof(*icresp);
911 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
912 if (ret < 0)
913 goto free_crypto;
914
915 queue->state = NVMET_TCP_Q_LIVE;
916 nvmet_prepare_receive_pdu(queue);
917 return 0;
918free_crypto:
919 if (queue->hdr_digest || queue->data_digest)
920 nvmet_tcp_free_crypto(queue);
921 return ret;
922}
923
924static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
925 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
926{
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600927 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800928 int ret;
929
Sagi Grimberg30e32f32021-12-08 15:35:06 +0200930 /*
931 * This command has not been processed yet, hence we are trying to
932 * figure out if there is still pending data left to receive. If
933 * we don't, we can simply prepare for the next pdu and bail out,
934 * otherwise we will need to prepare a buffer and receive the
935 * stale data before continuing forward.
936 */
937 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600938 data_len > cmd->req.port->inline_data_size) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800939 nvmet_prepare_receive_pdu(queue);
940 return;
941 }
942
943 ret = nvmet_tcp_map_data(cmd);
944 if (unlikely(ret)) {
945 pr_err("queue %d: failed to map data\n", queue->idx);
946 nvmet_tcp_fatal_error(queue);
947 return;
948 }
949
950 queue->rcv_state = NVMET_TCP_RECV_DATA;
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200951 nvmet_tcp_build_pdu_iovec(cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800952 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
953}
954
955static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
956{
957 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
958 struct nvmet_tcp_cmd *cmd;
959
Varun Prakashb6a545f2022-09-21 00:06:49 +0530960 if (likely(queue->nr_cmds)) {
961 if (unlikely(data->ttag >= queue->nr_cmds)) {
962 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
963 queue->idx, data->ttag, queue->nr_cmds);
964 nvmet_tcp_fatal_error(queue);
965 return -EPROTO;
966 }
Ziye Yanga6ce7d72020-08-22 00:48:10 +0800967 cmd = &queue->cmds[data->ttag];
Varun Prakashb6a545f2022-09-21 00:06:49 +0530968 } else {
Ziye Yanga6ce7d72020-08-22 00:48:10 +0800969 cmd = &queue->connect;
Varun Prakashb6a545f2022-09-21 00:06:49 +0530970 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800971
972 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
973 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
974 data->ttag, le32_to_cpu(data->data_offset),
975 cmd->rbytes_done);
976 /* FIXME: use path and transport errors */
977 nvmet_req_complete(&cmd->req,
978 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
979 return -EPROTO;
980 }
981
982 cmd->pdu_len = le32_to_cpu(data->data_length);
983 cmd->pdu_recv = 0;
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +0200984 nvmet_tcp_build_pdu_iovec(cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800985 queue->cmd = cmd;
986 queue->rcv_state = NVMET_TCP_RECV_DATA;
987
988 return 0;
989}
990
991static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
992{
993 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
994 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
995 struct nvmet_req *req;
996 int ret;
997
998 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
999 if (hdr->type != nvme_tcp_icreq) {
1000 pr_err("unexpected pdu type (%d) before icreq\n",
1001 hdr->type);
1002 nvmet_tcp_fatal_error(queue);
1003 return -EPROTO;
1004 }
1005 return nvmet_tcp_handle_icreq(queue);
1006 }
1007
Varun Prakashf614b932022-09-21 00:04:44 +05301008 if (unlikely(hdr->type == nvme_tcp_icreq)) {
1009 pr_err("queue %d: received icreq pdu in state %d\n",
1010 queue->idx, queue->state);
1011 nvmet_tcp_fatal_error(queue);
1012 return -EPROTO;
1013 }
1014
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001015 if (hdr->type == nvme_tcp_h2c_data) {
1016 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1017 if (unlikely(ret))
1018 return ret;
1019 return 0;
1020 }
1021
1022 queue->cmd = nvmet_tcp_get_cmd(queue);
1023 if (unlikely(!queue->cmd)) {
1024 /* This should never happen */
1025 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1026 queue->idx, queue->nr_cmds, queue->send_list_len,
1027 nvme_cmd->common.opcode);
1028 nvmet_tcp_fatal_error(queue);
1029 return -ENOMEM;
1030 }
1031
1032 req = &queue->cmd->req;
1033 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1034
1035 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1036 &queue->nvme_sq, &nvmet_tcp_ops))) {
1037 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1038 req->cmd, req->cmd->common.command_id,
1039 req->cmd->common.opcode,
1040 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1041
1042 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
Elad Grupibdaf1322021-03-31 17:13:14 +08001043 return 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001044 }
1045
1046 ret = nvmet_tcp_map_data(queue->cmd);
1047 if (unlikely(ret)) {
1048 pr_err("queue %d: failed to map data\n", queue->idx);
1049 if (nvmet_tcp_has_inline_data(queue->cmd))
1050 nvmet_tcp_fatal_error(queue);
1051 else
1052 nvmet_req_complete(req, ret);
1053 ret = -EAGAIN;
1054 goto out;
1055 }
1056
1057 if (nvmet_tcp_need_data_in(queue->cmd)) {
1058 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1059 queue->rcv_state = NVMET_TCP_RECV_DATA;
Fabio M. De Francesco5bfaba22022-08-31 00:05:33 +02001060 nvmet_tcp_build_pdu_iovec(queue->cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001061 return 0;
1062 }
1063 /* send back R2T */
1064 nvmet_tcp_queue_response(&queue->cmd->req);
1065 goto out;
1066 }
1067
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001068 queue->cmd->req.execute(&queue->cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001069out:
1070 nvmet_prepare_receive_pdu(queue);
1071 return ret;
1072}
1073
1074static const u8 nvme_tcp_pdu_sizes[] = {
1075 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1076 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1077 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1078};
1079
1080static inline u8 nvmet_tcp_pdu_size(u8 type)
1081{
1082 size_t idx = type;
1083
1084 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1085 nvme_tcp_pdu_sizes[idx]) ?
1086 nvme_tcp_pdu_sizes[idx] : 0;
1087}
1088
1089static inline bool nvmet_tcp_pdu_valid(u8 type)
1090{
1091 switch (type) {
1092 case nvme_tcp_icreq:
1093 case nvme_tcp_cmd:
1094 case nvme_tcp_h2c_data:
1095 /* fallthru */
1096 return true;
1097 }
1098
1099 return false;
1100}
1101
1102static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1103{
1104 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1105 int len;
1106 struct kvec iov;
1107 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1108
1109recv:
1110 iov.iov_base = (void *)&queue->pdu + queue->offset;
1111 iov.iov_len = queue->left;
1112 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1113 iov.iov_len, msg.msg_flags);
1114 if (unlikely(len < 0))
1115 return len;
1116
1117 queue->offset += len;
1118 queue->left -= len;
1119 if (queue->left)
1120 return -EAGAIN;
1121
1122 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1123 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1124
1125 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1126 pr_err("unexpected pdu type %d\n", hdr->type);
1127 nvmet_tcp_fatal_error(queue);
1128 return -EIO;
1129 }
1130
1131 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1132 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1133 return -EIO;
1134 }
1135
1136 queue->left = hdr->hlen - queue->offset + hdgst;
1137 goto recv;
1138 }
1139
1140 if (queue->hdr_digest &&
Amit Engel86aeda32021-10-27 09:49:27 +03001141 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001142 nvmet_tcp_fatal_error(queue); /* fatal */
1143 return -EPROTO;
1144 }
1145
1146 if (queue->data_digest &&
1147 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1148 nvmet_tcp_fatal_error(queue); /* fatal */
1149 return -EPROTO;
1150 }
1151
1152 return nvmet_tcp_done_recv_pdu(queue);
1153}
1154
1155static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1156{
1157 struct nvmet_tcp_queue *queue = cmd->queue;
1158
Sagi Grimberged0691cf2022-06-24 00:49:53 +03001159 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001160 queue->offset = 0;
1161 queue->left = NVME_TCP_DIGEST_LENGTH;
1162 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1163}
1164
1165static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1166{
1167 struct nvmet_tcp_cmd *cmd = queue->cmd;
1168 int ret;
1169
1170 while (msg_data_left(&cmd->recv_msg)) {
1171 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1172 cmd->recv_msg.msg_flags);
1173 if (ret <= 0)
1174 return ret;
1175
1176 cmd->pdu_recv += ret;
1177 cmd->rbytes_done += ret;
1178 }
1179
Sagi Grimbergfda871c2021-02-03 15:00:01 -08001180 if (queue->data_digest) {
1181 nvmet_tcp_prep_recv_ddgst(cmd);
1182 return 0;
1183 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001184
Elad Grupibdaf1322021-03-31 17:13:14 +08001185 if (cmd->rbytes_done == cmd->req.transfer_len)
1186 nvmet_tcp_execute_request(cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001187
1188 nvmet_prepare_receive_pdu(queue);
1189 return 0;
1190}
1191
1192static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1193{
1194 struct nvmet_tcp_cmd *cmd = queue->cmd;
1195 int ret;
1196 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1197 struct kvec iov = {
1198 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1199 .iov_len = queue->left
1200 };
1201
1202 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1203 iov.iov_len, msg.msg_flags);
1204 if (unlikely(ret < 0))
1205 return ret;
1206
1207 queue->offset += ret;
1208 queue->left -= ret;
1209 if (queue->left)
1210 return -EAGAIN;
1211
1212 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1213 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1214 queue->idx, cmd->req.cmd->common.command_id,
1215 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1216 le32_to_cpu(cmd->exp_ddgst));
zhenwei pi07005422022-09-22 15:06:16 +08001217 nvmet_req_uninit(&cmd->req);
1218 nvmet_tcp_free_cmd_buffers(cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001219 nvmet_tcp_fatal_error(queue);
1220 ret = -EPROTO;
1221 goto out;
1222 }
1223
Elad Grupibdaf1322021-03-31 17:13:14 +08001224 if (cmd->rbytes_done == cmd->req.transfer_len)
1225 nvmet_tcp_execute_request(cmd);
1226
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001227 ret = 0;
1228out:
1229 nvmet_prepare_receive_pdu(queue);
1230 return ret;
1231}
1232
1233static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1234{
Sagi Grimbergfb865852019-01-09 14:56:32 -08001235 int result = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001236
1237 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1238 return 0;
1239
1240 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1241 result = nvmet_tcp_try_recv_pdu(queue);
1242 if (result != 0)
1243 goto done_recv;
1244 }
1245
1246 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1247 result = nvmet_tcp_try_recv_data(queue);
1248 if (result != 0)
1249 goto done_recv;
1250 }
1251
1252 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1253 result = nvmet_tcp_try_recv_ddgst(queue);
1254 if (result != 0)
1255 goto done_recv;
1256 }
1257
1258done_recv:
1259 if (result < 0) {
1260 if (result == -EAGAIN)
1261 return 0;
1262 return result;
1263 }
1264 return 1;
1265}
1266
1267static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1268 int budget, int *recvs)
1269{
1270 int i, ret = 0;
1271
1272 for (i = 0; i < budget; i++) {
1273 ret = nvmet_tcp_try_recv_one(queue);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001274 if (unlikely(ret < 0)) {
1275 nvmet_tcp_socket_error(queue, ret);
1276 goto done;
1277 } else if (ret == 0) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001278 break;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001279 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001280 (*recvs)++;
1281 }
Sagi Grimberg0236d342020-05-18 10:47:48 -07001282done:
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001283 return ret;
1284}
1285
1286static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1287{
1288 spin_lock(&queue->state_lock);
1289 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1290 queue->state = NVMET_TCP_Q_DISCONNECTING;
Sagi Grimberg8832cf92022-03-21 13:57:27 +02001291 queue_work(nvmet_wq, &queue->release_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001292 }
1293 spin_unlock(&queue->state_lock);
1294}
1295
Wunderlich, Markd8e7b462021-03-31 21:38:30 +00001296static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1297{
1298 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1299}
1300
1301static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1302 int ops)
1303{
1304 if (!idle_poll_period_usecs)
1305 return false;
1306
1307 if (ops)
1308 nvmet_tcp_arm_queue_deadline(queue);
1309
1310 return !time_after(jiffies, queue->poll_end);
1311}
1312
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001313static void nvmet_tcp_io_work(struct work_struct *w)
1314{
1315 struct nvmet_tcp_queue *queue =
1316 container_of(w, struct nvmet_tcp_queue, io_work);
1317 bool pending;
1318 int ret, ops = 0;
1319
1320 do {
1321 pending = false;
1322
1323 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001324 if (ret > 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001325 pending = true;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001326 else if (ret < 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001327 return;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001328
1329 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001330 if (ret > 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001331 pending = true;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001332 else if (ret < 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001333 return;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001334
1335 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1336
1337 /*
Wunderlich, Markd8e7b462021-03-31 21:38:30 +00001338 * Requeue the worker if idle deadline period is in progress or any
1339 * ops activity was recorded during the do-while loop above.
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001340 */
Wunderlich, Markd8e7b462021-03-31 21:38:30 +00001341 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001342 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001343}
1344
1345static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1346 struct nvmet_tcp_cmd *c)
1347{
1348 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1349
1350 c->queue = queue;
1351 c->req.port = queue->port->nport;
1352
1353 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1354 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1355 if (!c->cmd_pdu)
1356 return -ENOMEM;
1357 c->req.cmd = &c->cmd_pdu->cmd;
1358
1359 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1360 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1361 if (!c->rsp_pdu)
1362 goto out_free_cmd;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +03001363 c->req.cqe = &c->rsp_pdu->cqe;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001364
1365 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1366 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1367 if (!c->data_pdu)
1368 goto out_free_rsp;
1369
1370 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1371 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1372 if (!c->r2t_pdu)
1373 goto out_free_data;
1374
1375 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1376
1377 list_add_tail(&c->entry, &queue->free_list);
1378
1379 return 0;
1380out_free_data:
1381 page_frag_free(c->data_pdu);
1382out_free_rsp:
1383 page_frag_free(c->rsp_pdu);
1384out_free_cmd:
1385 page_frag_free(c->cmd_pdu);
1386 return -ENOMEM;
1387}
1388
1389static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1390{
1391 page_frag_free(c->r2t_pdu);
1392 page_frag_free(c->data_pdu);
1393 page_frag_free(c->rsp_pdu);
1394 page_frag_free(c->cmd_pdu);
1395}
1396
1397static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1398{
1399 struct nvmet_tcp_cmd *cmds;
1400 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1401
1402 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1403 if (!cmds)
1404 goto out;
1405
1406 for (i = 0; i < nr_cmds; i++) {
1407 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1408 if (ret)
1409 goto out_free;
1410 }
1411
1412 queue->cmds = cmds;
1413
1414 return 0;
1415out_free:
1416 while (--i >= 0)
1417 nvmet_tcp_free_cmd(cmds + i);
1418 kfree(cmds);
1419out:
1420 return ret;
1421}
1422
1423static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1424{
1425 struct nvmet_tcp_cmd *cmds = queue->cmds;
1426 int i;
1427
1428 for (i = 0; i < queue->nr_cmds; i++)
1429 nvmet_tcp_free_cmd(cmds + i);
1430
1431 nvmet_tcp_free_cmd(&queue->connect);
1432 kfree(cmds);
1433}
1434
1435static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1436{
1437 struct socket *sock = queue->sock;
1438
1439 write_lock_bh(&sock->sk->sk_callback_lock);
1440 sock->sk->sk_data_ready = queue->data_ready;
1441 sock->sk->sk_state_change = queue->state_change;
1442 sock->sk->sk_write_space = queue->write_space;
1443 sock->sk->sk_user_data = NULL;
1444 write_unlock_bh(&sock->sk->sk_callback_lock);
1445}
1446
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001447static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1448{
1449 struct nvmet_tcp_cmd *cmd = queue->cmds;
1450 int i;
1451
1452 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1453 if (nvmet_tcp_need_data_in(cmd))
Maurizio Lombardiaf212502021-11-16 16:49:20 +01001454 nvmet_req_uninit(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001455 }
1456
1457 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1458 /* failed in connect */
zhenwei pidb94f242022-09-20 21:16:17 +08001459 nvmet_req_uninit(&queue->connect.req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001460 }
1461}
1462
zhenwei pidb94f242022-09-20 21:16:17 +08001463static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1464{
1465 struct nvmet_tcp_cmd *cmd = queue->cmds;
1466 int i;
1467
1468 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1469 if (nvmet_tcp_need_data_in(cmd))
1470 nvmet_tcp_free_cmd_buffers(cmd);
1471 }
1472
1473 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1474 nvmet_tcp_free_cmd_buffers(&queue->connect);
1475}
1476
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001477static void nvmet_tcp_release_queue_work(struct work_struct *w)
1478{
Maurizio Lombardi926245c2021-10-15 10:26:34 +02001479 struct page *page;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001480 struct nvmet_tcp_queue *queue =
1481 container_of(w, struct nvmet_tcp_queue, release_work);
1482
1483 mutex_lock(&nvmet_tcp_queue_mutex);
1484 list_del_init(&queue->queue_list);
1485 mutex_unlock(&nvmet_tcp_queue_mutex);
1486
1487 nvmet_tcp_restore_socket_callbacks(queue);
Maurizio Lombardia208fc52021-11-16 16:49:18 +01001488 cancel_work_sync(&queue->io_work);
1489 /* stop accepting incoming data */
1490 queue->rcv_state = NVMET_TCP_RECV_ERR;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001491
1492 nvmet_tcp_uninit_data_in_cmds(queue);
1493 nvmet_sq_destroy(&queue->nvme_sq);
1494 cancel_work_sync(&queue->io_work);
zhenwei pidb94f242022-09-20 21:16:17 +08001495 nvmet_tcp_free_cmd_data_in_buffers(queue);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001496 sock_release(queue->sock);
1497 nvmet_tcp_free_cmds(queue);
1498 if (queue->hdr_digest || queue->data_digest)
1499 nvmet_tcp_free_crypto(queue);
Sagi Grimberg44f331a2022-02-14 11:07:32 +02001500 ida_free(&nvmet_tcp_queue_ida, queue->idx);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001501
Maurizio Lombardi926245c2021-10-15 10:26:34 +02001502 page = virt_to_head_page(queue->pf_cache.va);
1503 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001504 kfree(queue);
1505}
1506
1507static void nvmet_tcp_data_ready(struct sock *sk)
1508{
1509 struct nvmet_tcp_queue *queue;
1510
Peilin Ye40e0b092023-01-19 16:45:16 -08001511 trace_sk_data_ready(sk);
1512
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001513 read_lock_bh(&sk->sk_callback_lock);
1514 queue = sk->sk_user_data;
1515 if (likely(queue))
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001516 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001517 read_unlock_bh(&sk->sk_callback_lock);
1518}
1519
1520static void nvmet_tcp_write_space(struct sock *sk)
1521{
1522 struct nvmet_tcp_queue *queue;
1523
1524 read_lock_bh(&sk->sk_callback_lock);
1525 queue = sk->sk_user_data;
1526 if (unlikely(!queue))
1527 goto out;
1528
1529 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1530 queue->write_space(sk);
1531 goto out;
1532 }
1533
1534 if (sk_stream_is_writeable(sk)) {
1535 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001536 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001537 }
1538out:
1539 read_unlock_bh(&sk->sk_callback_lock);
1540}
1541
1542static void nvmet_tcp_state_change(struct sock *sk)
1543{
1544 struct nvmet_tcp_queue *queue;
1545
Sagi Grimbergb5332a92021-03-21 00:08:49 -07001546 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001547 queue = sk->sk_user_data;
1548 if (!queue)
1549 goto done;
1550
1551 switch (sk->sk_state) {
Maurizio Lombardi478814a2022-08-29 14:40:30 +02001552 case TCP_FIN_WAIT2:
1553 case TCP_LAST_ACK:
1554 break;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001555 case TCP_FIN_WAIT1:
1556 case TCP_CLOSE_WAIT:
1557 case TCP_CLOSE:
1558 /* FALLTHRU */
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001559 nvmet_tcp_schedule_release_queue(queue);
1560 break;
1561 default:
1562 pr_warn("queue %d unhandled state %d\n",
1563 queue->idx, sk->sk_state);
1564 }
1565done:
Sagi Grimbergb5332a92021-03-21 00:08:49 -07001566 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001567}
1568
1569static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1570{
1571 struct socket *sock = queue->sock;
Israel Rukshin89275a92019-08-18 12:08:55 +03001572 struct inet_sock *inet = inet_sk(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001573 int ret;
1574
1575 ret = kernel_getsockname(sock,
1576 (struct sockaddr *)&queue->sockaddr);
1577 if (ret < 0)
1578 return ret;
1579
1580 ret = kernel_getpeername(sock,
1581 (struct sockaddr *)&queue->sockaddr_peer);
1582 if (ret < 0)
1583 return ret;
1584
1585 /*
1586 * Cleanup whatever is sitting in the TCP transmit queue on socket
1587 * close. This is done to prevent stale data from being sent should
1588 * the network connection be restored before TCP times out.
1589 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001590 sock_no_linger(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001591
Christoph Hellwig6e434962020-05-28 07:12:11 +02001592 if (so_priority > 0)
1593 sock_set_priority(sock->sk, so_priority);
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001594
Israel Rukshin89275a92019-08-18 12:08:55 +03001595 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001596 if (inet->rcv_tos > 0)
1597 ip_sock_set_tos(sock->sk, inet->rcv_tos);
Israel Rukshin89275a92019-08-18 12:08:55 +03001598
Sagi Grimberg0fbcfb02021-02-05 11:47:25 -08001599 ret = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001600 write_lock_bh(&sock->sk->sk_callback_lock);
Sagi Grimberg0fbcfb02021-02-05 11:47:25 -08001601 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1602 /*
1603 * If the socket is already closing, don't even start
1604 * consuming it
1605 */
1606 ret = -ENOTCONN;
1607 } else {
1608 sock->sk->sk_user_data = queue;
1609 queue->data_ready = sock->sk->sk_data_ready;
1610 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1611 queue->state_change = sock->sk->sk_state_change;
1612 sock->sk->sk_state_change = nvmet_tcp_state_change;
1613 queue->write_space = sock->sk->sk_write_space;
1614 sock->sk->sk_write_space = nvmet_tcp_write_space;
Wunderlich, Markd8e7b462021-03-31 21:38:30 +00001615 if (idle_poll_period_usecs)
1616 nvmet_tcp_arm_queue_deadline(queue);
Sagi Grimberg0fbcfb02021-02-05 11:47:25 -08001617 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1618 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001619 write_unlock_bh(&sock->sk->sk_callback_lock);
1620
Sagi Grimberg0fbcfb02021-02-05 11:47:25 -08001621 return ret;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001622}
1623
1624static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1625 struct socket *newsock)
1626{
1627 struct nvmet_tcp_queue *queue;
1628 int ret;
1629
1630 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1631 if (!queue)
1632 return -ENOMEM;
1633
1634 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1635 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1636 queue->sock = newsock;
1637 queue->port = port;
1638 queue->nr_cmds = 0;
1639 spin_lock_init(&queue->state_lock);
1640 queue->state = NVMET_TCP_Q_CONNECTING;
1641 INIT_LIST_HEAD(&queue->free_list);
1642 init_llist_head(&queue->resp_list);
1643 INIT_LIST_HEAD(&queue->resp_send_list);
1644
Sagi Grimberg44f331a2022-02-14 11:07:32 +02001645 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001646 if (queue->idx < 0) {
1647 ret = queue->idx;
1648 goto out_free_queue;
1649 }
1650
1651 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1652 if (ret)
1653 goto out_ida_remove;
1654
1655 ret = nvmet_sq_init(&queue->nvme_sq);
1656 if (ret)
1657 goto out_free_connect;
1658
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001659 nvmet_prepare_receive_pdu(queue);
1660
1661 mutex_lock(&nvmet_tcp_queue_mutex);
1662 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1663 mutex_unlock(&nvmet_tcp_queue_mutex);
1664
1665 ret = nvmet_tcp_set_queue_sock(queue);
1666 if (ret)
1667 goto out_destroy_sq;
1668
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001669 return 0;
1670out_destroy_sq:
1671 mutex_lock(&nvmet_tcp_queue_mutex);
1672 list_del_init(&queue->queue_list);
1673 mutex_unlock(&nvmet_tcp_queue_mutex);
1674 nvmet_sq_destroy(&queue->nvme_sq);
1675out_free_connect:
1676 nvmet_tcp_free_cmd(&queue->connect);
1677out_ida_remove:
Sagi Grimberg44f331a2022-02-14 11:07:32 +02001678 ida_free(&nvmet_tcp_queue_ida, queue->idx);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001679out_free_queue:
1680 kfree(queue);
1681 return ret;
1682}
1683
1684static void nvmet_tcp_accept_work(struct work_struct *w)
1685{
1686 struct nvmet_tcp_port *port =
1687 container_of(w, struct nvmet_tcp_port, accept_work);
1688 struct socket *newsock;
1689 int ret;
1690
1691 while (true) {
1692 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1693 if (ret < 0) {
1694 if (ret != -EAGAIN)
1695 pr_warn("failed to accept err=%d\n", ret);
1696 return;
1697 }
1698 ret = nvmet_tcp_alloc_queue(port, newsock);
1699 if (ret) {
1700 pr_err("failed to allocate queue\n");
1701 sock_release(newsock);
1702 }
1703 }
1704}
1705
1706static void nvmet_tcp_listen_data_ready(struct sock *sk)
1707{
1708 struct nvmet_tcp_port *port;
1709
Peilin Ye40e0b092023-01-19 16:45:16 -08001710 trace_sk_data_ready(sk);
1711
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001712 read_lock_bh(&sk->sk_callback_lock);
1713 port = sk->sk_user_data;
1714 if (!port)
1715 goto out;
1716
1717 if (sk->sk_state == TCP_LISTEN)
Sagi Grimberg8832cf92022-03-21 13:57:27 +02001718 queue_work(nvmet_wq, &port->accept_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001719out:
1720 read_unlock_bh(&sk->sk_callback_lock);
1721}
1722
1723static int nvmet_tcp_add_port(struct nvmet_port *nport)
1724{
1725 struct nvmet_tcp_port *port;
1726 __kernel_sa_family_t af;
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001727 int ret;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001728
1729 port = kzalloc(sizeof(*port), GFP_KERNEL);
1730 if (!port)
1731 return -ENOMEM;
1732
1733 switch (nport->disc_addr.adrfam) {
1734 case NVMF_ADDR_FAMILY_IP4:
1735 af = AF_INET;
1736 break;
1737 case NVMF_ADDR_FAMILY_IP6:
1738 af = AF_INET6;
1739 break;
1740 default:
1741 pr_err("address family %d not supported\n",
1742 nport->disc_addr.adrfam);
1743 ret = -EINVAL;
1744 goto err_port;
1745 }
1746
1747 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1748 nport->disc_addr.trsvcid, &port->addr);
1749 if (ret) {
1750 pr_err("malformed ip/port passed: %s:%s\n",
1751 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1752 goto err_port;
1753 }
1754
1755 port->nport = nport;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001756 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1757 if (port->nport->inline_data_size < 0)
1758 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1759
1760 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1761 IPPROTO_TCP, &port->sock);
1762 if (ret) {
1763 pr_err("failed to create a socket\n");
1764 goto err_port;
1765 }
1766
1767 port->sock->sk->sk_user_data = port;
1768 port->data_ready = port->sock->sk->sk_data_ready;
1769 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
Christoph Hellwigb58f0e82020-05-28 07:12:09 +02001770 sock_set_reuseaddr(port->sock->sk);
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001771 tcp_sock_set_nodelay(port->sock->sk);
Christoph Hellwig6e434962020-05-28 07:12:11 +02001772 if (so_priority > 0)
1773 sock_set_priority(port->sock->sk, so_priority);
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001774
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001775 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1776 sizeof(port->addr));
1777 if (ret) {
1778 pr_err("failed to bind port socket %d\n", ret);
1779 goto err_sock;
1780 }
1781
1782 ret = kernel_listen(port->sock, 128);
1783 if (ret) {
1784 pr_err("failed to listen %d on port sock\n", ret);
1785 goto err_sock;
1786 }
1787
1788 nport->priv = port;
1789 pr_info("enabling port %d (%pISpc)\n",
1790 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1791
1792 return 0;
1793
1794err_sock:
1795 sock_release(port->sock);
1796err_port:
1797 kfree(port);
1798 return ret;
1799}
1800
Israel Rukshin2351ead2021-10-06 08:09:45 +00001801static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1802{
1803 struct nvmet_tcp_queue *queue;
1804
1805 mutex_lock(&nvmet_tcp_queue_mutex);
1806 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1807 if (queue->port == port)
1808 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1809 mutex_unlock(&nvmet_tcp_queue_mutex);
1810}
1811
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001812static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1813{
1814 struct nvmet_tcp_port *port = nport->priv;
1815
1816 write_lock_bh(&port->sock->sk->sk_callback_lock);
1817 port->sock->sk->sk_data_ready = port->data_ready;
1818 port->sock->sk->sk_user_data = NULL;
1819 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1820 cancel_work_sync(&port->accept_work);
Israel Rukshin2351ead2021-10-06 08:09:45 +00001821 /*
1822 * Destroy the remaining queues, which are not belong to any
1823 * controller yet.
1824 */
1825 nvmet_tcp_destroy_port_queues(port);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001826
1827 sock_release(port->sock);
1828 kfree(port);
1829}
1830
1831static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1832{
1833 struct nvmet_tcp_queue *queue;
1834
1835 mutex_lock(&nvmet_tcp_queue_mutex);
1836 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1837 if (queue->nvme_sq.ctrl == ctrl)
1838 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1839 mutex_unlock(&nvmet_tcp_queue_mutex);
1840}
1841
1842static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1843{
1844 struct nvmet_tcp_queue *queue =
1845 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1846
1847 if (sq->qid == 0) {
1848 /* Let inflight controller teardown complete */
Sagi Grimberg8832cf92022-03-21 13:57:27 +02001849 flush_workqueue(nvmet_wq);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001850 }
1851
1852 queue->nr_cmds = sq->size * 2;
1853 if (nvmet_tcp_alloc_cmds(queue))
1854 return NVME_SC_INTERNAL;
1855 return 0;
1856}
1857
1858static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1859 struct nvmet_port *nport, char *traddr)
1860{
1861 struct nvmet_tcp_port *port = nport->priv;
1862
1863 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1864 struct nvmet_tcp_cmd *cmd =
1865 container_of(req, struct nvmet_tcp_cmd, req);
1866 struct nvmet_tcp_queue *queue = cmd->queue;
1867
1868 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1869 } else {
1870 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1871 }
1872}
1873
Max Gurtovoya40aae62020-06-01 20:05:20 +03001874static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001875 .owner = THIS_MODULE,
1876 .type = NVMF_TRTYPE_TCP,
1877 .msdbd = 1,
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001878 .add_port = nvmet_tcp_add_port,
1879 .remove_port = nvmet_tcp_remove_port,
1880 .queue_response = nvmet_tcp_queue_response,
1881 .delete_ctrl = nvmet_tcp_delete_ctrl,
1882 .install_queue = nvmet_tcp_install_queue,
1883 .disc_traddr = nvmet_tcp_disc_port_addr,
1884};
1885
1886static int __init nvmet_tcp_init(void)
1887{
1888 int ret;
1889
Sagi Grimberg533d2e82022-07-24 11:58:43 +03001890 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1891 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001892 if (!nvmet_tcp_wq)
1893 return -ENOMEM;
1894
1895 ret = nvmet_register_transport(&nvmet_tcp_ops);
1896 if (ret)
1897 goto err;
1898
1899 return 0;
1900err:
1901 destroy_workqueue(nvmet_tcp_wq);
1902 return ret;
1903}
1904
1905static void __exit nvmet_tcp_exit(void)
1906{
1907 struct nvmet_tcp_queue *queue;
1908
1909 nvmet_unregister_transport(&nvmet_tcp_ops);
1910
Sagi Grimberg8832cf92022-03-21 13:57:27 +02001911 flush_workqueue(nvmet_wq);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001912 mutex_lock(&nvmet_tcp_queue_mutex);
1913 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1914 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1915 mutex_unlock(&nvmet_tcp_queue_mutex);
Sagi Grimberg8832cf92022-03-21 13:57:27 +02001916 flush_workqueue(nvmet_wq);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001917
1918 destroy_workqueue(nvmet_tcp_wq);
1919}
1920
1921module_init(nvmet_tcp_init);
1922module_exit(nvmet_tcp_exit);
1923
1924MODULE_LICENSE("GPL v2");
1925MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */