1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17 #include <trace/events/sock.h>
18
19 #include "nvmet.h"
20
21 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
22 #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
23
param_store_val(const char * str,int * val,int min,int max)24 static int param_store_val(const char *str, int *val, int min, int max)
25 {
26 int ret, new_val;
27
28 ret = kstrtoint(str, 10, &new_val);
29 if (ret)
30 return -EINVAL;
31
32 if (new_val < min || new_val > max)
33 return -EINVAL;
34
35 *val = new_val;
36 return 0;
37 }
38
set_params(const char * str,const struct kernel_param * kp)39 static int set_params(const char *str, const struct kernel_param *kp)
40 {
41 return param_store_val(str, kp->arg, 0, INT_MAX);
42 }
43
44 static const struct kernel_param_ops set_param_ops = {
45 .set = set_params,
46 .get = param_get_int,
47 };
48
49 /* Define the socket priority to use for connections were it is desirable
50 * that the NIC consider performing optimized packet processing or filtering.
51 * A non-zero value being sufficient to indicate general consideration of any
52 * possible optimization. Making it a module param allows for alternative
53 * values that may be unique for some NIC implementations.
54 */
55 static int so_priority;
56 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
57 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
58
59 /* Define a time period (in usecs) that io_work() shall sample an activated
60 * queue before determining it to be idle. This optional module behavior
61 * can enable NIC solutions that support socket optimized packet processing
62 * using advanced interrupt moderation techniques.
63 */
64 static int idle_poll_period_usecs;
65 device_param_cb(idle_poll_period_usecs, &set_param_ops,
66 &idle_poll_period_usecs, 0644);
67 MODULE_PARM_DESC(idle_poll_period_usecs,
68 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
69
70 #define NVMET_TCP_RECV_BUDGET 8
71 #define NVMET_TCP_SEND_BUDGET 8
72 #define NVMET_TCP_IO_WORK_BUDGET 64
73
74 enum nvmet_tcp_send_state {
75 NVMET_TCP_SEND_DATA_PDU,
76 NVMET_TCP_SEND_DATA,
77 NVMET_TCP_SEND_R2T,
78 NVMET_TCP_SEND_DDGST,
79 NVMET_TCP_SEND_RESPONSE
80 };
81
82 enum nvmet_tcp_recv_state {
83 NVMET_TCP_RECV_PDU,
84 NVMET_TCP_RECV_DATA,
85 NVMET_TCP_RECV_DDGST,
86 NVMET_TCP_RECV_ERR,
87 };
88
89 enum {
90 NVMET_TCP_F_INIT_FAILED = (1 << 0),
91 };
92
93 struct nvmet_tcp_cmd {
94 struct nvmet_tcp_queue *queue;
95 struct nvmet_req req;
96
97 struct nvme_tcp_cmd_pdu *cmd_pdu;
98 struct nvme_tcp_rsp_pdu *rsp_pdu;
99 struct nvme_tcp_data_pdu *data_pdu;
100 struct nvme_tcp_r2t_pdu *r2t_pdu;
101
102 u32 rbytes_done;
103 u32 wbytes_done;
104
105 u32 pdu_len;
106 u32 pdu_recv;
107 int sg_idx;
108 struct msghdr recv_msg;
109 struct bio_vec *iov;
110 u32 flags;
111
112 struct list_head entry;
113 struct llist_node lentry;
114
115 /* send state */
116 u32 offset;
117 struct scatterlist *cur_sg;
118 enum nvmet_tcp_send_state state;
119
120 __le32 exp_ddgst;
121 __le32 recv_ddgst;
122 };
123
124 enum nvmet_tcp_queue_state {
125 NVMET_TCP_Q_CONNECTING,
126 NVMET_TCP_Q_LIVE,
127 NVMET_TCP_Q_DISCONNECTING,
128 };
129
130 struct nvmet_tcp_queue {
131 struct socket *sock;
132 struct nvmet_tcp_port *port;
133 struct work_struct io_work;
134 struct nvmet_cq nvme_cq;
135 struct nvmet_sq nvme_sq;
136
137 /* send state */
138 struct nvmet_tcp_cmd *cmds;
139 unsigned int nr_cmds;
140 struct list_head free_list;
141 struct llist_head resp_list;
142 struct list_head resp_send_list;
143 int send_list_len;
144 struct nvmet_tcp_cmd *snd_cmd;
145
146 /* recv state */
147 int offset;
148 int left;
149 enum nvmet_tcp_recv_state rcv_state;
150 struct nvmet_tcp_cmd *cmd;
151 union nvme_tcp_pdu pdu;
152
153 /* digest state */
154 bool hdr_digest;
155 bool data_digest;
156 struct ahash_request *snd_hash;
157 struct ahash_request *rcv_hash;
158
159 unsigned long poll_end;
160
161 spinlock_t state_lock;
162 enum nvmet_tcp_queue_state state;
163
164 struct sockaddr_storage sockaddr;
165 struct sockaddr_storage sockaddr_peer;
166 struct work_struct release_work;
167
168 int idx;
169 struct list_head queue_list;
170
171 struct nvmet_tcp_cmd connect;
172
173 struct page_frag_cache pf_cache;
174
175 void (*data_ready)(struct sock *);
176 void (*state_change)(struct sock *);
177 void (*write_space)(struct sock *);
178 };
179
180 struct nvmet_tcp_port {
181 struct socket *sock;
182 struct work_struct accept_work;
183 struct nvmet_port *nport;
184 struct sockaddr_storage addr;
185 void (*data_ready)(struct sock *);
186 };
187
188 static DEFINE_IDA(nvmet_tcp_queue_ida);
189 static LIST_HEAD(nvmet_tcp_queue_list);
190 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
191
192 static struct workqueue_struct *nvmet_tcp_wq;
193 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
194 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
195 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
196
nvmet_tcp_cmd_tag(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd)197 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
198 struct nvmet_tcp_cmd *cmd)
199 {
200 if (unlikely(!queue->nr_cmds)) {
201 /* We didn't allocate cmds yet, send 0xffff */
202 return USHRT_MAX;
203 }
204
205 return cmd - queue->cmds;
206 }
207
nvmet_tcp_has_data_in(struct nvmet_tcp_cmd * cmd)208 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
209 {
210 return nvme_is_write(cmd->req.cmd) &&
211 cmd->rbytes_done < cmd->req.transfer_len;
212 }
213
nvmet_tcp_need_data_in(struct nvmet_tcp_cmd * cmd)214 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
215 {
216 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
217 }
218
nvmet_tcp_need_data_out(struct nvmet_tcp_cmd * cmd)219 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
220 {
221 return !nvme_is_write(cmd->req.cmd) &&
222 cmd->req.transfer_len > 0 &&
223 !cmd->req.cqe->status;
224 }
225
nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd * cmd)226 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
227 {
228 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
229 !cmd->rbytes_done;
230 }
231
232 static inline struct nvmet_tcp_cmd *
nvmet_tcp_get_cmd(struct nvmet_tcp_queue * queue)233 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
234 {
235 struct nvmet_tcp_cmd *cmd;
236
237 cmd = list_first_entry_or_null(&queue->free_list,
238 struct nvmet_tcp_cmd, entry);
239 if (!cmd)
240 return NULL;
241 list_del_init(&cmd->entry);
242
243 cmd->rbytes_done = cmd->wbytes_done = 0;
244 cmd->pdu_len = 0;
245 cmd->pdu_recv = 0;
246 cmd->iov = NULL;
247 cmd->flags = 0;
248 return cmd;
249 }
250
nvmet_tcp_put_cmd(struct nvmet_tcp_cmd * cmd)251 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
252 {
253 if (unlikely(cmd == &cmd->queue->connect))
254 return;
255
256 list_add_tail(&cmd->entry, &cmd->queue->free_list);
257 }
258
queue_cpu(struct nvmet_tcp_queue * queue)259 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
260 {
261 return queue->sock->sk->sk_incoming_cpu;
262 }
263
nvmet_tcp_hdgst_len(struct nvmet_tcp_queue * queue)264 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
265 {
266 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
267 }
268
nvmet_tcp_ddgst_len(struct nvmet_tcp_queue * queue)269 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
270 {
271 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
272 }
273
nvmet_tcp_hdgst(struct ahash_request * hash,void * pdu,size_t len)274 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
275 void *pdu, size_t len)
276 {
277 struct scatterlist sg;
278
279 sg_init_one(&sg, pdu, len);
280 ahash_request_set_crypt(hash, &sg, pdu + len, len);
281 crypto_ahash_digest(hash);
282 }
283
nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue * queue,void * pdu,size_t len)284 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
285 void *pdu, size_t len)
286 {
287 struct nvme_tcp_hdr *hdr = pdu;
288 __le32 recv_digest;
289 __le32 exp_digest;
290
291 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
292 pr_err("queue %d: header digest enabled but no header digest\n",
293 queue->idx);
294 return -EPROTO;
295 }
296
297 recv_digest = *(__le32 *)(pdu + hdr->hlen);
298 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
299 exp_digest = *(__le32 *)(pdu + hdr->hlen);
300 if (recv_digest != exp_digest) {
301 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
302 queue->idx, le32_to_cpu(recv_digest),
303 le32_to_cpu(exp_digest));
304 return -EPROTO;
305 }
306
307 return 0;
308 }
309
nvmet_tcp_check_ddgst(struct nvmet_tcp_queue * queue,void * pdu)310 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
311 {
312 struct nvme_tcp_hdr *hdr = pdu;
313 u8 digest_len = nvmet_tcp_hdgst_len(queue);
314 u32 len;
315
316 len = le32_to_cpu(hdr->plen) - hdr->hlen -
317 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
318
319 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
320 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
321 return -EPROTO;
322 }
323
324 return 0;
325 }
326
327 /* If cmd buffers are NULL, no operation is performed */
nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd * cmd)328 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
329 {
330 kfree(cmd->iov);
331 sgl_free(cmd->req.sg);
332 cmd->iov = NULL;
333 cmd->req.sg = NULL;
334 }
335
nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd * cmd)336 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
337 {
338 struct bio_vec *iov = cmd->iov;
339 struct scatterlist *sg;
340 u32 length, offset, sg_offset;
341 int nr_pages;
342
343 length = cmd->pdu_len;
344 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
345 offset = cmd->rbytes_done;
346 cmd->sg_idx = offset / PAGE_SIZE;
347 sg_offset = offset % PAGE_SIZE;
348 sg = &cmd->req.sg[cmd->sg_idx];
349
350 while (length) {
351 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
352
353 bvec_set_page(iov, sg_page(sg), iov_len,
354 sg->offset + sg_offset);
355
356 length -= iov_len;
357 sg = sg_next(sg);
358 iov++;
359 sg_offset = 0;
360 }
361
362 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
363 nr_pages, cmd->pdu_len);
364 }
365
nvmet_tcp_fatal_error(struct nvmet_tcp_queue * queue)366 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
367 {
368 queue->rcv_state = NVMET_TCP_RECV_ERR;
369 if (queue->nvme_sq.ctrl)
370 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
371 else
372 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
373 }
374
nvmet_tcp_socket_error(struct nvmet_tcp_queue * queue,int status)375 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
376 {
377 queue->rcv_state = NVMET_TCP_RECV_ERR;
378 if (status == -EPIPE || status == -ECONNRESET)
379 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
380 else
381 nvmet_tcp_fatal_error(queue);
382 }
383
nvmet_tcp_map_data(struct nvmet_tcp_cmd * cmd)384 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
385 {
386 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
387 u32 len = le32_to_cpu(sgl->length);
388
389 if (!len)
390 return 0;
391
392 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
393 NVME_SGL_FMT_OFFSET)) {
394 if (!nvme_is_write(cmd->req.cmd))
395 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
396
397 if (len > cmd->req.port->inline_data_size)
398 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
399 cmd->pdu_len = len;
400 }
401 cmd->req.transfer_len += len;
402
403 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
404 if (!cmd->req.sg)
405 return NVME_SC_INTERNAL;
406 cmd->cur_sg = cmd->req.sg;
407
408 if (nvmet_tcp_has_data_in(cmd)) {
409 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
410 sizeof(*cmd->iov), GFP_KERNEL);
411 if (!cmd->iov)
412 goto err;
413 }
414
415 return 0;
416 err:
417 nvmet_tcp_free_cmd_buffers(cmd);
418 return NVME_SC_INTERNAL;
419 }
420
nvmet_tcp_calc_ddgst(struct ahash_request * hash,struct nvmet_tcp_cmd * cmd)421 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
422 struct nvmet_tcp_cmd *cmd)
423 {
424 ahash_request_set_crypt(hash, cmd->req.sg,
425 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
426 crypto_ahash_digest(hash);
427 }
428
nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd * cmd)429 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
430 {
431 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
432 struct nvmet_tcp_queue *queue = cmd->queue;
433 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
434 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
435
436 cmd->offset = 0;
437 cmd->state = NVMET_TCP_SEND_DATA_PDU;
438
439 pdu->hdr.type = nvme_tcp_c2h_data;
440 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
441 NVME_TCP_F_DATA_SUCCESS : 0);
442 pdu->hdr.hlen = sizeof(*pdu);
443 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
444 pdu->hdr.plen =
445 cpu_to_le32(pdu->hdr.hlen + hdgst +
446 cmd->req.transfer_len + ddgst);
447 pdu->command_id = cmd->req.cqe->command_id;
448 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
449 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
450
451 if (queue->data_digest) {
452 pdu->hdr.flags |= NVME_TCP_F_DDGST;
453 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
454 }
455
456 if (cmd->queue->hdr_digest) {
457 pdu->hdr.flags |= NVME_TCP_F_HDGST;
458 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
459 }
460 }
461
nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd * cmd)462 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
463 {
464 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
465 struct nvmet_tcp_queue *queue = cmd->queue;
466 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
467
468 cmd->offset = 0;
469 cmd->state = NVMET_TCP_SEND_R2T;
470
471 pdu->hdr.type = nvme_tcp_r2t;
472 pdu->hdr.flags = 0;
473 pdu->hdr.hlen = sizeof(*pdu);
474 pdu->hdr.pdo = 0;
475 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
476
477 pdu->command_id = cmd->req.cmd->common.command_id;
478 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
479 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
480 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
481 if (cmd->queue->hdr_digest) {
482 pdu->hdr.flags |= NVME_TCP_F_HDGST;
483 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
484 }
485 }
486
nvmet_setup_response_pdu(struct nvmet_tcp_cmd * cmd)487 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
488 {
489 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
490 struct nvmet_tcp_queue *queue = cmd->queue;
491 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
492
493 cmd->offset = 0;
494 cmd->state = NVMET_TCP_SEND_RESPONSE;
495
496 pdu->hdr.type = nvme_tcp_rsp;
497 pdu->hdr.flags = 0;
498 pdu->hdr.hlen = sizeof(*pdu);
499 pdu->hdr.pdo = 0;
500 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
501 if (cmd->queue->hdr_digest) {
502 pdu->hdr.flags |= NVME_TCP_F_HDGST;
503 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
504 }
505 }
506
nvmet_tcp_process_resp_list(struct nvmet_tcp_queue * queue)507 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
508 {
509 struct llist_node *node;
510 struct nvmet_tcp_cmd *cmd;
511
512 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
513 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
514 list_add(&cmd->entry, &queue->resp_send_list);
515 queue->send_list_len++;
516 }
517 }
518
nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue * queue)519 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
520 {
521 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
522 struct nvmet_tcp_cmd, entry);
523 if (!queue->snd_cmd) {
524 nvmet_tcp_process_resp_list(queue);
525 queue->snd_cmd =
526 list_first_entry_or_null(&queue->resp_send_list,
527 struct nvmet_tcp_cmd, entry);
528 if (unlikely(!queue->snd_cmd))
529 return NULL;
530 }
531
532 list_del_init(&queue->snd_cmd->entry);
533 queue->send_list_len--;
534
535 if (nvmet_tcp_need_data_out(queue->snd_cmd))
536 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
537 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
538 nvmet_setup_r2t_pdu(queue->snd_cmd);
539 else
540 nvmet_setup_response_pdu(queue->snd_cmd);
541
542 return queue->snd_cmd;
543 }
544
nvmet_tcp_queue_response(struct nvmet_req * req)545 static void nvmet_tcp_queue_response(struct nvmet_req *req)
546 {
547 struct nvmet_tcp_cmd *cmd =
548 container_of(req, struct nvmet_tcp_cmd, req);
549 struct nvmet_tcp_queue *queue = cmd->queue;
550 struct nvme_sgl_desc *sgl;
551 u32 len;
552
553 if (unlikely(cmd == queue->cmd)) {
554 sgl = &cmd->req.cmd->common.dptr.sgl;
555 len = le32_to_cpu(sgl->length);
556
557 /*
558 * Wait for inline data before processing the response.
559 * Avoid using helpers, this might happen before
560 * nvmet_req_init is completed.
561 */
562 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
563 len && len <= cmd->req.port->inline_data_size &&
564 nvme_is_write(cmd->req.cmd))
565 return;
566 }
567
568 llist_add(&cmd->lentry, &queue->resp_list);
569 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
570 }
571
nvmet_tcp_execute_request(struct nvmet_tcp_cmd * cmd)572 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
573 {
574 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
575 nvmet_tcp_queue_response(&cmd->req);
576 else
577 cmd->req.execute(&cmd->req);
578 }
579
nvmet_try_send_data_pdu(struct nvmet_tcp_cmd * cmd)580 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
581 {
582 struct msghdr msg = {
583 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
584 };
585 struct bio_vec bvec;
586 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
587 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
588 int ret;
589
590 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
591 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
592 ret = sock_sendmsg(cmd->queue->sock, &msg);
593 if (ret <= 0)
594 return ret;
595
596 cmd->offset += ret;
597 left -= ret;
598
599 if (left)
600 return -EAGAIN;
601
602 cmd->state = NVMET_TCP_SEND_DATA;
603 cmd->offset = 0;
604 return 1;
605 }
606
nvmet_try_send_data(struct nvmet_tcp_cmd * cmd,bool last_in_batch)607 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
608 {
609 struct nvmet_tcp_queue *queue = cmd->queue;
610 int ret;
611
612 while (cmd->cur_sg) {
613 struct msghdr msg = {
614 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
615 };
616 struct page *page = sg_page(cmd->cur_sg);
617 struct bio_vec bvec;
618 u32 left = cmd->cur_sg->length - cmd->offset;
619
620 if ((!last_in_batch && cmd->queue->send_list_len) ||
621 cmd->wbytes_done + left < cmd->req.transfer_len ||
622 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
623 msg.msg_flags |= MSG_MORE;
624
625 bvec_set_page(&bvec, page, left, cmd->offset);
626 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
627 ret = sock_sendmsg(cmd->queue->sock, &msg);
628 if (ret <= 0)
629 return ret;
630
631 cmd->offset += ret;
632 cmd->wbytes_done += ret;
633
634 /* Done with sg?*/
635 if (cmd->offset == cmd->cur_sg->length) {
636 cmd->cur_sg = sg_next(cmd->cur_sg);
637 cmd->offset = 0;
638 }
639 }
640
641 if (queue->data_digest) {
642 cmd->state = NVMET_TCP_SEND_DDGST;
643 cmd->offset = 0;
644 } else {
645 if (queue->nvme_sq.sqhd_disabled) {
646 cmd->queue->snd_cmd = NULL;
647 nvmet_tcp_put_cmd(cmd);
648 } else {
649 nvmet_setup_response_pdu(cmd);
650 }
651 }
652
653 if (queue->nvme_sq.sqhd_disabled)
654 nvmet_tcp_free_cmd_buffers(cmd);
655
656 return 1;
657
658 }
659
nvmet_try_send_response(struct nvmet_tcp_cmd * cmd,bool last_in_batch)660 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
661 bool last_in_batch)
662 {
663 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
664 struct bio_vec bvec;
665 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
666 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
667 int ret;
668
669 if (!last_in_batch && cmd->queue->send_list_len)
670 msg.msg_flags |= MSG_MORE;
671 else
672 msg.msg_flags |= MSG_EOR;
673
674 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
675 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
676 ret = sock_sendmsg(cmd->queue->sock, &msg);
677 if (ret <= 0)
678 return ret;
679 cmd->offset += ret;
680 left -= ret;
681
682 if (left)
683 return -EAGAIN;
684
685 nvmet_tcp_free_cmd_buffers(cmd);
686 cmd->queue->snd_cmd = NULL;
687 nvmet_tcp_put_cmd(cmd);
688 return 1;
689 }
690
nvmet_try_send_r2t(struct nvmet_tcp_cmd * cmd,bool last_in_batch)691 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
692 {
693 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
694 struct bio_vec bvec;
695 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
696 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
697 int ret;
698
699 if (!last_in_batch && cmd->queue->send_list_len)
700 msg.msg_flags |= MSG_MORE;
701 else
702 msg.msg_flags |= MSG_EOR;
703
704 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
705 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
706 ret = sock_sendmsg(cmd->queue->sock, &msg);
707 if (ret <= 0)
708 return ret;
709 cmd->offset += ret;
710 left -= ret;
711
712 if (left)
713 return -EAGAIN;
714
715 cmd->queue->snd_cmd = NULL;
716 return 1;
717 }
718
nvmet_try_send_ddgst(struct nvmet_tcp_cmd * cmd,bool last_in_batch)719 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
720 {
721 struct nvmet_tcp_queue *queue = cmd->queue;
722 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
723 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
724 struct kvec iov = {
725 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
726 .iov_len = left
727 };
728 int ret;
729
730 if (!last_in_batch && cmd->queue->send_list_len)
731 msg.msg_flags |= MSG_MORE;
732 else
733 msg.msg_flags |= MSG_EOR;
734
735 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
736 if (unlikely(ret <= 0))
737 return ret;
738
739 cmd->offset += ret;
740 left -= ret;
741
742 if (left)
743 return -EAGAIN;
744
745 if (queue->nvme_sq.sqhd_disabled) {
746 cmd->queue->snd_cmd = NULL;
747 nvmet_tcp_put_cmd(cmd);
748 } else {
749 nvmet_setup_response_pdu(cmd);
750 }
751 return 1;
752 }
753
nvmet_tcp_try_send_one(struct nvmet_tcp_queue * queue,bool last_in_batch)754 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
755 bool last_in_batch)
756 {
757 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
758 int ret = 0;
759
760 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
761 cmd = nvmet_tcp_fetch_cmd(queue);
762 if (unlikely(!cmd))
763 return 0;
764 }
765
766 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
767 ret = nvmet_try_send_data_pdu(cmd);
768 if (ret <= 0)
769 goto done_send;
770 }
771
772 if (cmd->state == NVMET_TCP_SEND_DATA) {
773 ret = nvmet_try_send_data(cmd, last_in_batch);
774 if (ret <= 0)
775 goto done_send;
776 }
777
778 if (cmd->state == NVMET_TCP_SEND_DDGST) {
779 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
780 if (ret <= 0)
781 goto done_send;
782 }
783
784 if (cmd->state == NVMET_TCP_SEND_R2T) {
785 ret = nvmet_try_send_r2t(cmd, last_in_batch);
786 if (ret <= 0)
787 goto done_send;
788 }
789
790 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
791 ret = nvmet_try_send_response(cmd, last_in_batch);
792
793 done_send:
794 if (ret < 0) {
795 if (ret == -EAGAIN)
796 return 0;
797 return ret;
798 }
799
800 return 1;
801 }
802
nvmet_tcp_try_send(struct nvmet_tcp_queue * queue,int budget,int * sends)803 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
804 int budget, int *sends)
805 {
806 int i, ret = 0;
807
808 for (i = 0; i < budget; i++) {
809 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
810 if (unlikely(ret < 0)) {
811 nvmet_tcp_socket_error(queue, ret);
812 goto done;
813 } else if (ret == 0) {
814 break;
815 }
816 (*sends)++;
817 }
818 done:
819 return ret;
820 }
821
nvmet_prepare_receive_pdu(struct nvmet_tcp_queue * queue)822 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
823 {
824 queue->offset = 0;
825 queue->left = sizeof(struct nvme_tcp_hdr);
826 queue->cmd = NULL;
827 queue->rcv_state = NVMET_TCP_RECV_PDU;
828 }
829
nvmet_tcp_free_crypto(struct nvmet_tcp_queue * queue)830 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
831 {
832 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
833
834 ahash_request_free(queue->rcv_hash);
835 ahash_request_free(queue->snd_hash);
836 crypto_free_ahash(tfm);
837 }
838
nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue * queue)839 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
840 {
841 struct crypto_ahash *tfm;
842
843 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
844 if (IS_ERR(tfm))
845 return PTR_ERR(tfm);
846
847 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
848 if (!queue->snd_hash)
849 goto free_tfm;
850 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
851
852 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
853 if (!queue->rcv_hash)
854 goto free_snd_hash;
855 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
856
857 return 0;
858 free_snd_hash:
859 ahash_request_free(queue->snd_hash);
860 free_tfm:
861 crypto_free_ahash(tfm);
862 return -ENOMEM;
863 }
864
865
nvmet_tcp_handle_icreq(struct nvmet_tcp_queue * queue)866 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
867 {
868 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
869 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
870 struct msghdr msg = {};
871 struct kvec iov;
872 int ret;
873
874 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
875 pr_err("bad nvme-tcp pdu length (%d)\n",
876 le32_to_cpu(icreq->hdr.plen));
877 nvmet_tcp_fatal_error(queue);
878 }
879
880 if (icreq->pfv != NVME_TCP_PFV_1_0) {
881 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
882 return -EPROTO;
883 }
884
885 if (icreq->hpda != 0) {
886 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
887 icreq->hpda);
888 return -EPROTO;
889 }
890
891 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
892 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
893 if (queue->hdr_digest || queue->data_digest) {
894 ret = nvmet_tcp_alloc_crypto(queue);
895 if (ret)
896 return ret;
897 }
898
899 memset(icresp, 0, sizeof(*icresp));
900 icresp->hdr.type = nvme_tcp_icresp;
901 icresp->hdr.hlen = sizeof(*icresp);
902 icresp->hdr.pdo = 0;
903 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
904 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
905 icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
906 icresp->cpda = 0;
907 if (queue->hdr_digest)
908 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
909 if (queue->data_digest)
910 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
911
912 iov.iov_base = icresp;
913 iov.iov_len = sizeof(*icresp);
914 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
915 if (ret < 0)
916 return ret; /* queue removal will cleanup */
917
918 queue->state = NVMET_TCP_Q_LIVE;
919 nvmet_prepare_receive_pdu(queue);
920 return 0;
921 }
922
nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd,struct nvmet_req * req)923 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
924 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
925 {
926 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
927 int ret;
928
929 /*
930 * This command has not been processed yet, hence we are trying to
931 * figure out if there is still pending data left to receive. If
932 * we don't, we can simply prepare for the next pdu and bail out,
933 * otherwise we will need to prepare a buffer and receive the
934 * stale data before continuing forward.
935 */
936 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
937 data_len > cmd->req.port->inline_data_size) {
938 nvmet_prepare_receive_pdu(queue);
939 return;
940 }
941
942 ret = nvmet_tcp_map_data(cmd);
943 if (unlikely(ret)) {
944 pr_err("queue %d: failed to map data\n", queue->idx);
945 nvmet_tcp_fatal_error(queue);
946 return;
947 }
948
949 queue->rcv_state = NVMET_TCP_RECV_DATA;
950 nvmet_tcp_build_pdu_iovec(cmd);
951 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
952 }
953
nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue * queue)954 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
955 {
956 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
957 struct nvmet_tcp_cmd *cmd;
958 unsigned int exp_data_len;
959
960 if (likely(queue->nr_cmds)) {
961 if (unlikely(data->ttag >= queue->nr_cmds)) {
962 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
963 queue->idx, data->ttag, queue->nr_cmds);
964 nvmet_tcp_fatal_error(queue);
965 return -EPROTO;
966 }
967 cmd = &queue->cmds[data->ttag];
968 } else {
969 cmd = &queue->connect;
970 }
971
972 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
973 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
974 data->ttag, le32_to_cpu(data->data_offset),
975 cmd->rbytes_done);
976 /* FIXME: use path and transport errors */
977 nvmet_tcp_fatal_error(queue);
978 return -EPROTO;
979 }
980
981 exp_data_len = le32_to_cpu(data->hdr.plen) -
982 nvmet_tcp_hdgst_len(queue) -
983 nvmet_tcp_ddgst_len(queue) -
984 sizeof(*data);
985
986 cmd->pdu_len = le32_to_cpu(data->data_length);
987 if (unlikely(cmd->pdu_len != exp_data_len ||
988 cmd->pdu_len == 0 ||
989 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
990 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
991 /* FIXME: use proper transport errors */
992 nvmet_tcp_fatal_error(queue);
993 return -EPROTO;
994 }
995 cmd->pdu_recv = 0;
996 nvmet_tcp_build_pdu_iovec(cmd);
997 queue->cmd = cmd;
998 queue->rcv_state = NVMET_TCP_RECV_DATA;
999
1000 return 0;
1001 }
1002
nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue * queue)1003 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1004 {
1005 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1006 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1007 struct nvmet_req *req;
1008 int ret;
1009
1010 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1011 if (hdr->type != nvme_tcp_icreq) {
1012 pr_err("unexpected pdu type (%d) before icreq\n",
1013 hdr->type);
1014 nvmet_tcp_fatal_error(queue);
1015 return -EPROTO;
1016 }
1017 return nvmet_tcp_handle_icreq(queue);
1018 }
1019
1020 if (unlikely(hdr->type == nvme_tcp_icreq)) {
1021 pr_err("queue %d: received icreq pdu in state %d\n",
1022 queue->idx, queue->state);
1023 nvmet_tcp_fatal_error(queue);
1024 return -EPROTO;
1025 }
1026
1027 if (hdr->type == nvme_tcp_h2c_data) {
1028 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1029 if (unlikely(ret))
1030 return ret;
1031 return 0;
1032 }
1033
1034 queue->cmd = nvmet_tcp_get_cmd(queue);
1035 if (unlikely(!queue->cmd)) {
1036 /* This should never happen */
1037 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1038 queue->idx, queue->nr_cmds, queue->send_list_len,
1039 nvme_cmd->common.opcode);
1040 nvmet_tcp_fatal_error(queue);
1041 return -ENOMEM;
1042 }
1043
1044 req = &queue->cmd->req;
1045 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1046
1047 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1048 &queue->nvme_sq, &nvmet_tcp_ops))) {
1049 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1050 req->cmd, req->cmd->common.command_id,
1051 req->cmd->common.opcode,
1052 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1053
1054 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1055 return 0;
1056 }
1057
1058 ret = nvmet_tcp_map_data(queue->cmd);
1059 if (unlikely(ret)) {
1060 pr_err("queue %d: failed to map data\n", queue->idx);
1061 if (nvmet_tcp_has_inline_data(queue->cmd))
1062 nvmet_tcp_fatal_error(queue);
1063 else
1064 nvmet_req_complete(req, ret);
1065 ret = -EAGAIN;
1066 goto out;
1067 }
1068
1069 if (nvmet_tcp_need_data_in(queue->cmd)) {
1070 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1071 queue->rcv_state = NVMET_TCP_RECV_DATA;
1072 nvmet_tcp_build_pdu_iovec(queue->cmd);
1073 return 0;
1074 }
1075 /* send back R2T */
1076 nvmet_tcp_queue_response(&queue->cmd->req);
1077 goto out;
1078 }
1079
1080 queue->cmd->req.execute(&queue->cmd->req);
1081 out:
1082 nvmet_prepare_receive_pdu(queue);
1083 return ret;
1084 }
1085
1086 static const u8 nvme_tcp_pdu_sizes[] = {
1087 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1088 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1089 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1090 };
1091
nvmet_tcp_pdu_size(u8 type)1092 static inline u8 nvmet_tcp_pdu_size(u8 type)
1093 {
1094 size_t idx = type;
1095
1096 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1097 nvme_tcp_pdu_sizes[idx]) ?
1098 nvme_tcp_pdu_sizes[idx] : 0;
1099 }
1100
nvmet_tcp_pdu_valid(u8 type)1101 static inline bool nvmet_tcp_pdu_valid(u8 type)
1102 {
1103 switch (type) {
1104 case nvme_tcp_icreq:
1105 case nvme_tcp_cmd:
1106 case nvme_tcp_h2c_data:
1107 /* fallthru */
1108 return true;
1109 }
1110
1111 return false;
1112 }
1113
nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue * queue)1114 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1115 {
1116 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1117 int len;
1118 struct kvec iov;
1119 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1120
1121 recv:
1122 iov.iov_base = (void *)&queue->pdu + queue->offset;
1123 iov.iov_len = queue->left;
1124 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1125 iov.iov_len, msg.msg_flags);
1126 if (unlikely(len < 0))
1127 return len;
1128
1129 queue->offset += len;
1130 queue->left -= len;
1131 if (queue->left)
1132 return -EAGAIN;
1133
1134 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1135 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1136
1137 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1138 pr_err("unexpected pdu type %d\n", hdr->type);
1139 nvmet_tcp_fatal_error(queue);
1140 return -EIO;
1141 }
1142
1143 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1144 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1145 return -EIO;
1146 }
1147
1148 queue->left = hdr->hlen - queue->offset + hdgst;
1149 goto recv;
1150 }
1151
1152 if (queue->hdr_digest &&
1153 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1154 nvmet_tcp_fatal_error(queue); /* fatal */
1155 return -EPROTO;
1156 }
1157
1158 if (queue->data_digest &&
1159 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1160 nvmet_tcp_fatal_error(queue); /* fatal */
1161 return -EPROTO;
1162 }
1163
1164 return nvmet_tcp_done_recv_pdu(queue);
1165 }
1166
nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd * cmd)1167 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1168 {
1169 struct nvmet_tcp_queue *queue = cmd->queue;
1170
1171 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1172 queue->offset = 0;
1173 queue->left = NVME_TCP_DIGEST_LENGTH;
1174 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1175 }
1176
nvmet_tcp_try_recv_data(struct nvmet_tcp_queue * queue)1177 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1178 {
1179 struct nvmet_tcp_cmd *cmd = queue->cmd;
1180 int ret;
1181
1182 while (msg_data_left(&cmd->recv_msg)) {
1183 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1184 cmd->recv_msg.msg_flags);
1185 if (ret <= 0)
1186 return ret;
1187
1188 cmd->pdu_recv += ret;
1189 cmd->rbytes_done += ret;
1190 }
1191
1192 if (queue->data_digest) {
1193 nvmet_tcp_prep_recv_ddgst(cmd);
1194 return 0;
1195 }
1196
1197 if (cmd->rbytes_done == cmd->req.transfer_len)
1198 nvmet_tcp_execute_request(cmd);
1199
1200 nvmet_prepare_receive_pdu(queue);
1201 return 0;
1202 }
1203
nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue * queue)1204 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1205 {
1206 struct nvmet_tcp_cmd *cmd = queue->cmd;
1207 int ret;
1208 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1209 struct kvec iov = {
1210 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1211 .iov_len = queue->left
1212 };
1213
1214 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1215 iov.iov_len, msg.msg_flags);
1216 if (unlikely(ret < 0))
1217 return ret;
1218
1219 queue->offset += ret;
1220 queue->left -= ret;
1221 if (queue->left)
1222 return -EAGAIN;
1223
1224 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1225 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1226 queue->idx, cmd->req.cmd->common.command_id,
1227 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1228 le32_to_cpu(cmd->exp_ddgst));
1229 nvmet_req_uninit(&cmd->req);
1230 nvmet_tcp_free_cmd_buffers(cmd);
1231 nvmet_tcp_fatal_error(queue);
1232 ret = -EPROTO;
1233 goto out;
1234 }
1235
1236 if (cmd->rbytes_done == cmd->req.transfer_len)
1237 nvmet_tcp_execute_request(cmd);
1238
1239 ret = 0;
1240 out:
1241 nvmet_prepare_receive_pdu(queue);
1242 return ret;
1243 }
1244
nvmet_tcp_try_recv_one(struct nvmet_tcp_queue * queue)1245 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1246 {
1247 int result = 0;
1248
1249 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1250 return 0;
1251
1252 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1253 result = nvmet_tcp_try_recv_pdu(queue);
1254 if (result != 0)
1255 goto done_recv;
1256 }
1257
1258 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1259 result = nvmet_tcp_try_recv_data(queue);
1260 if (result != 0)
1261 goto done_recv;
1262 }
1263
1264 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1265 result = nvmet_tcp_try_recv_ddgst(queue);
1266 if (result != 0)
1267 goto done_recv;
1268 }
1269
1270 done_recv:
1271 if (result < 0) {
1272 if (result == -EAGAIN)
1273 return 0;
1274 return result;
1275 }
1276 return 1;
1277 }
1278
nvmet_tcp_try_recv(struct nvmet_tcp_queue * queue,int budget,int * recvs)1279 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1280 int budget, int *recvs)
1281 {
1282 int i, ret = 0;
1283
1284 for (i = 0; i < budget; i++) {
1285 ret = nvmet_tcp_try_recv_one(queue);
1286 if (unlikely(ret < 0)) {
1287 nvmet_tcp_socket_error(queue, ret);
1288 goto done;
1289 } else if (ret == 0) {
1290 break;
1291 }
1292 (*recvs)++;
1293 }
1294 done:
1295 return ret;
1296 }
1297
nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue * queue)1298 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1299 {
1300 spin_lock(&queue->state_lock);
1301 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1302 queue->state = NVMET_TCP_Q_DISCONNECTING;
1303 queue_work(nvmet_wq, &queue->release_work);
1304 }
1305 spin_unlock(&queue->state_lock);
1306 }
1307
nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue * queue)1308 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1309 {
1310 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1311 }
1312
nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue * queue,int ops)1313 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1314 int ops)
1315 {
1316 if (!idle_poll_period_usecs)
1317 return false;
1318
1319 if (ops)
1320 nvmet_tcp_arm_queue_deadline(queue);
1321
1322 return !time_after(jiffies, queue->poll_end);
1323 }
1324
nvmet_tcp_io_work(struct work_struct * w)1325 static void nvmet_tcp_io_work(struct work_struct *w)
1326 {
1327 struct nvmet_tcp_queue *queue =
1328 container_of(w, struct nvmet_tcp_queue, io_work);
1329 bool pending;
1330 int ret, ops = 0;
1331
1332 do {
1333 pending = false;
1334
1335 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1336 if (ret > 0)
1337 pending = true;
1338 else if (ret < 0)
1339 return;
1340
1341 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1342 if (ret > 0)
1343 pending = true;
1344 else if (ret < 0)
1345 return;
1346
1347 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1348
1349 /*
1350 * Requeue the worker if idle deadline period is in progress or any
1351 * ops activity was recorded during the do-while loop above.
1352 */
1353 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1354 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1355 }
1356
nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * c)1357 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1358 struct nvmet_tcp_cmd *c)
1359 {
1360 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1361
1362 c->queue = queue;
1363 c->req.port = queue->port->nport;
1364
1365 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1366 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1367 if (!c->cmd_pdu)
1368 return -ENOMEM;
1369 c->req.cmd = &c->cmd_pdu->cmd;
1370
1371 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1372 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1373 if (!c->rsp_pdu)
1374 goto out_free_cmd;
1375 c->req.cqe = &c->rsp_pdu->cqe;
1376
1377 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1378 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1379 if (!c->data_pdu)
1380 goto out_free_rsp;
1381
1382 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1383 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1384 if (!c->r2t_pdu)
1385 goto out_free_data;
1386
1387 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1388
1389 list_add_tail(&c->entry, &queue->free_list);
1390
1391 return 0;
1392 out_free_data:
1393 page_frag_free(c->data_pdu);
1394 out_free_rsp:
1395 page_frag_free(c->rsp_pdu);
1396 out_free_cmd:
1397 page_frag_free(c->cmd_pdu);
1398 return -ENOMEM;
1399 }
1400
nvmet_tcp_free_cmd(struct nvmet_tcp_cmd * c)1401 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1402 {
1403 page_frag_free(c->r2t_pdu);
1404 page_frag_free(c->data_pdu);
1405 page_frag_free(c->rsp_pdu);
1406 page_frag_free(c->cmd_pdu);
1407 }
1408
nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue * queue)1409 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1410 {
1411 struct nvmet_tcp_cmd *cmds;
1412 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1413
1414 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1415 if (!cmds)
1416 goto out;
1417
1418 for (i = 0; i < nr_cmds; i++) {
1419 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1420 if (ret)
1421 goto out_free;
1422 }
1423
1424 queue->cmds = cmds;
1425
1426 return 0;
1427 out_free:
1428 while (--i >= 0)
1429 nvmet_tcp_free_cmd(cmds + i);
1430 kfree(cmds);
1431 out:
1432 return ret;
1433 }
1434
nvmet_tcp_free_cmds(struct nvmet_tcp_queue * queue)1435 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1436 {
1437 struct nvmet_tcp_cmd *cmds = queue->cmds;
1438 int i;
1439
1440 for (i = 0; i < queue->nr_cmds; i++)
1441 nvmet_tcp_free_cmd(cmds + i);
1442
1443 nvmet_tcp_free_cmd(&queue->connect);
1444 kfree(cmds);
1445 }
1446
nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue * queue)1447 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1448 {
1449 struct socket *sock = queue->sock;
1450
1451 write_lock_bh(&sock->sk->sk_callback_lock);
1452 sock->sk->sk_data_ready = queue->data_ready;
1453 sock->sk->sk_state_change = queue->state_change;
1454 sock->sk->sk_write_space = queue->write_space;
1455 sock->sk->sk_user_data = NULL;
1456 write_unlock_bh(&sock->sk->sk_callback_lock);
1457 }
1458
nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue * queue)1459 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1460 {
1461 struct nvmet_tcp_cmd *cmd = queue->cmds;
1462 int i;
1463
1464 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1465 if (nvmet_tcp_need_data_in(cmd))
1466 nvmet_req_uninit(&cmd->req);
1467 }
1468
1469 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1470 /* failed in connect */
1471 nvmet_req_uninit(&queue->connect.req);
1472 }
1473 }
1474
nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue * queue)1475 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1476 {
1477 struct nvmet_tcp_cmd *cmd = queue->cmds;
1478 int i;
1479
1480 for (i = 0; i < queue->nr_cmds; i++, cmd++)
1481 nvmet_tcp_free_cmd_buffers(cmd);
1482 nvmet_tcp_free_cmd_buffers(&queue->connect);
1483 }
1484
nvmet_tcp_release_queue_work(struct work_struct * w)1485 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1486 {
1487 struct page *page;
1488 struct nvmet_tcp_queue *queue =
1489 container_of(w, struct nvmet_tcp_queue, release_work);
1490
1491 mutex_lock(&nvmet_tcp_queue_mutex);
1492 list_del_init(&queue->queue_list);
1493 mutex_unlock(&nvmet_tcp_queue_mutex);
1494
1495 nvmet_tcp_restore_socket_callbacks(queue);
1496 cancel_work_sync(&queue->io_work);
1497 /* stop accepting incoming data */
1498 queue->rcv_state = NVMET_TCP_RECV_ERR;
1499
1500 nvmet_tcp_uninit_data_in_cmds(queue);
1501 nvmet_sq_destroy(&queue->nvme_sq);
1502 cancel_work_sync(&queue->io_work);
1503 nvmet_tcp_free_cmd_data_in_buffers(queue);
1504 sock_release(queue->sock);
1505 nvmet_tcp_free_cmds(queue);
1506 if (queue->hdr_digest || queue->data_digest)
1507 nvmet_tcp_free_crypto(queue);
1508 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1509
1510 page = virt_to_head_page(queue->pf_cache.va);
1511 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1512 kfree(queue);
1513 }
1514
nvmet_tcp_data_ready(struct sock * sk)1515 static void nvmet_tcp_data_ready(struct sock *sk)
1516 {
1517 struct nvmet_tcp_queue *queue;
1518
1519 trace_sk_data_ready(sk);
1520
1521 read_lock_bh(&sk->sk_callback_lock);
1522 queue = sk->sk_user_data;
1523 if (likely(queue))
1524 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1525 read_unlock_bh(&sk->sk_callback_lock);
1526 }
1527
nvmet_tcp_write_space(struct sock * sk)1528 static void nvmet_tcp_write_space(struct sock *sk)
1529 {
1530 struct nvmet_tcp_queue *queue;
1531
1532 read_lock_bh(&sk->sk_callback_lock);
1533 queue = sk->sk_user_data;
1534 if (unlikely(!queue))
1535 goto out;
1536
1537 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1538 queue->write_space(sk);
1539 goto out;
1540 }
1541
1542 if (sk_stream_is_writeable(sk)) {
1543 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1544 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1545 }
1546 out:
1547 read_unlock_bh(&sk->sk_callback_lock);
1548 }
1549
nvmet_tcp_state_change(struct sock * sk)1550 static void nvmet_tcp_state_change(struct sock *sk)
1551 {
1552 struct nvmet_tcp_queue *queue;
1553
1554 read_lock_bh(&sk->sk_callback_lock);
1555 queue = sk->sk_user_data;
1556 if (!queue)
1557 goto done;
1558
1559 switch (sk->sk_state) {
1560 case TCP_FIN_WAIT2:
1561 case TCP_LAST_ACK:
1562 break;
1563 case TCP_FIN_WAIT1:
1564 case TCP_CLOSE_WAIT:
1565 case TCP_CLOSE:
1566 /* FALLTHRU */
1567 nvmet_tcp_schedule_release_queue(queue);
1568 break;
1569 default:
1570 pr_warn("queue %d unhandled state %d\n",
1571 queue->idx, sk->sk_state);
1572 }
1573 done:
1574 read_unlock_bh(&sk->sk_callback_lock);
1575 }
1576
nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue * queue)1577 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1578 {
1579 struct socket *sock = queue->sock;
1580 struct inet_sock *inet = inet_sk(sock->sk);
1581 int ret;
1582
1583 ret = kernel_getsockname(sock,
1584 (struct sockaddr *)&queue->sockaddr);
1585 if (ret < 0)
1586 return ret;
1587
1588 ret = kernel_getpeername(sock,
1589 (struct sockaddr *)&queue->sockaddr_peer);
1590 if (ret < 0)
1591 return ret;
1592
1593 /*
1594 * Cleanup whatever is sitting in the TCP transmit queue on socket
1595 * close. This is done to prevent stale data from being sent should
1596 * the network connection be restored before TCP times out.
1597 */
1598 sock_no_linger(sock->sk);
1599
1600 if (so_priority > 0)
1601 sock_set_priority(sock->sk, so_priority);
1602
1603 /* Set socket type of service */
1604 if (inet->rcv_tos > 0)
1605 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1606
1607 ret = 0;
1608 write_lock_bh(&sock->sk->sk_callback_lock);
1609 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1610 /*
1611 * If the socket is already closing, don't even start
1612 * consuming it
1613 */
1614 ret = -ENOTCONN;
1615 } else {
1616 sock->sk->sk_user_data = queue;
1617 queue->data_ready = sock->sk->sk_data_ready;
1618 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1619 queue->state_change = sock->sk->sk_state_change;
1620 sock->sk->sk_state_change = nvmet_tcp_state_change;
1621 queue->write_space = sock->sk->sk_write_space;
1622 sock->sk->sk_write_space = nvmet_tcp_write_space;
1623 if (idle_poll_period_usecs)
1624 nvmet_tcp_arm_queue_deadline(queue);
1625 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1626 }
1627 write_unlock_bh(&sock->sk->sk_callback_lock);
1628
1629 return ret;
1630 }
1631
nvmet_tcp_alloc_queue(struct nvmet_tcp_port * port,struct socket * newsock)1632 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1633 struct socket *newsock)
1634 {
1635 struct nvmet_tcp_queue *queue;
1636 int ret;
1637
1638 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1639 if (!queue)
1640 return -ENOMEM;
1641
1642 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1643 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1644 queue->sock = newsock;
1645 queue->port = port;
1646 queue->nr_cmds = 0;
1647 spin_lock_init(&queue->state_lock);
1648 queue->state = NVMET_TCP_Q_CONNECTING;
1649 INIT_LIST_HEAD(&queue->free_list);
1650 init_llist_head(&queue->resp_list);
1651 INIT_LIST_HEAD(&queue->resp_send_list);
1652
1653 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1654 if (queue->idx < 0) {
1655 ret = queue->idx;
1656 goto out_free_queue;
1657 }
1658
1659 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1660 if (ret)
1661 goto out_ida_remove;
1662
1663 ret = nvmet_sq_init(&queue->nvme_sq);
1664 if (ret)
1665 goto out_free_connect;
1666
1667 nvmet_prepare_receive_pdu(queue);
1668
1669 mutex_lock(&nvmet_tcp_queue_mutex);
1670 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1671 mutex_unlock(&nvmet_tcp_queue_mutex);
1672
1673 ret = nvmet_tcp_set_queue_sock(queue);
1674 if (ret)
1675 goto out_destroy_sq;
1676
1677 return 0;
1678 out_destroy_sq:
1679 mutex_lock(&nvmet_tcp_queue_mutex);
1680 list_del_init(&queue->queue_list);
1681 mutex_unlock(&nvmet_tcp_queue_mutex);
1682 nvmet_sq_destroy(&queue->nvme_sq);
1683 out_free_connect:
1684 nvmet_tcp_free_cmd(&queue->connect);
1685 out_ida_remove:
1686 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1687 out_free_queue:
1688 kfree(queue);
1689 return ret;
1690 }
1691
nvmet_tcp_accept_work(struct work_struct * w)1692 static void nvmet_tcp_accept_work(struct work_struct *w)
1693 {
1694 struct nvmet_tcp_port *port =
1695 container_of(w, struct nvmet_tcp_port, accept_work);
1696 struct socket *newsock;
1697 int ret;
1698
1699 while (true) {
1700 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1701 if (ret < 0) {
1702 if (ret != -EAGAIN)
1703 pr_warn("failed to accept err=%d\n", ret);
1704 return;
1705 }
1706 ret = nvmet_tcp_alloc_queue(port, newsock);
1707 if (ret) {
1708 pr_err("failed to allocate queue\n");
1709 sock_release(newsock);
1710 }
1711 }
1712 }
1713
nvmet_tcp_listen_data_ready(struct sock * sk)1714 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1715 {
1716 struct nvmet_tcp_port *port;
1717
1718 trace_sk_data_ready(sk);
1719
1720 read_lock_bh(&sk->sk_callback_lock);
1721 port = sk->sk_user_data;
1722 if (!port)
1723 goto out;
1724
1725 if (sk->sk_state == TCP_LISTEN)
1726 queue_work(nvmet_wq, &port->accept_work);
1727 out:
1728 read_unlock_bh(&sk->sk_callback_lock);
1729 }
1730
nvmet_tcp_add_port(struct nvmet_port * nport)1731 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1732 {
1733 struct nvmet_tcp_port *port;
1734 __kernel_sa_family_t af;
1735 int ret;
1736
1737 port = kzalloc(sizeof(*port), GFP_KERNEL);
1738 if (!port)
1739 return -ENOMEM;
1740
1741 switch (nport->disc_addr.adrfam) {
1742 case NVMF_ADDR_FAMILY_IP4:
1743 af = AF_INET;
1744 break;
1745 case NVMF_ADDR_FAMILY_IP6:
1746 af = AF_INET6;
1747 break;
1748 default:
1749 pr_err("address family %d not supported\n",
1750 nport->disc_addr.adrfam);
1751 ret = -EINVAL;
1752 goto err_port;
1753 }
1754
1755 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1756 nport->disc_addr.trsvcid, &port->addr);
1757 if (ret) {
1758 pr_err("malformed ip/port passed: %s:%s\n",
1759 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1760 goto err_port;
1761 }
1762
1763 port->nport = nport;
1764 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1765 if (port->nport->inline_data_size < 0)
1766 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1767
1768 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1769 IPPROTO_TCP, &port->sock);
1770 if (ret) {
1771 pr_err("failed to create a socket\n");
1772 goto err_port;
1773 }
1774
1775 port->sock->sk->sk_user_data = port;
1776 port->data_ready = port->sock->sk->sk_data_ready;
1777 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1778 sock_set_reuseaddr(port->sock->sk);
1779 tcp_sock_set_nodelay(port->sock->sk);
1780 if (so_priority > 0)
1781 sock_set_priority(port->sock->sk, so_priority);
1782
1783 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1784 sizeof(port->addr));
1785 if (ret) {
1786 pr_err("failed to bind port socket %d\n", ret);
1787 goto err_sock;
1788 }
1789
1790 ret = kernel_listen(port->sock, 128);
1791 if (ret) {
1792 pr_err("failed to listen %d on port sock\n", ret);
1793 goto err_sock;
1794 }
1795
1796 nport->priv = port;
1797 pr_info("enabling port %d (%pISpc)\n",
1798 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1799
1800 return 0;
1801
1802 err_sock:
1803 sock_release(port->sock);
1804 err_port:
1805 kfree(port);
1806 return ret;
1807 }
1808
nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port * port)1809 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1810 {
1811 struct nvmet_tcp_queue *queue;
1812
1813 mutex_lock(&nvmet_tcp_queue_mutex);
1814 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1815 if (queue->port == port)
1816 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1817 mutex_unlock(&nvmet_tcp_queue_mutex);
1818 }
1819
nvmet_tcp_remove_port(struct nvmet_port * nport)1820 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1821 {
1822 struct nvmet_tcp_port *port = nport->priv;
1823
1824 write_lock_bh(&port->sock->sk->sk_callback_lock);
1825 port->sock->sk->sk_data_ready = port->data_ready;
1826 port->sock->sk->sk_user_data = NULL;
1827 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1828 cancel_work_sync(&port->accept_work);
1829 /*
1830 * Destroy the remaining queues, which are not belong to any
1831 * controller yet.
1832 */
1833 nvmet_tcp_destroy_port_queues(port);
1834
1835 sock_release(port->sock);
1836 kfree(port);
1837 }
1838
nvmet_tcp_delete_ctrl(struct nvmet_ctrl * ctrl)1839 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1840 {
1841 struct nvmet_tcp_queue *queue;
1842
1843 mutex_lock(&nvmet_tcp_queue_mutex);
1844 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1845 if (queue->nvme_sq.ctrl == ctrl)
1846 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1847 mutex_unlock(&nvmet_tcp_queue_mutex);
1848 }
1849
nvmet_tcp_install_queue(struct nvmet_sq * sq)1850 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1851 {
1852 struct nvmet_tcp_queue *queue =
1853 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1854
1855 if (sq->qid == 0) {
1856 /* Let inflight controller teardown complete */
1857 flush_workqueue(nvmet_wq);
1858 }
1859
1860 queue->nr_cmds = sq->size * 2;
1861 if (nvmet_tcp_alloc_cmds(queue))
1862 return NVME_SC_INTERNAL;
1863 return 0;
1864 }
1865
nvmet_tcp_disc_port_addr(struct nvmet_req * req,struct nvmet_port * nport,char * traddr)1866 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1867 struct nvmet_port *nport, char *traddr)
1868 {
1869 struct nvmet_tcp_port *port = nport->priv;
1870
1871 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1872 struct nvmet_tcp_cmd *cmd =
1873 container_of(req, struct nvmet_tcp_cmd, req);
1874 struct nvmet_tcp_queue *queue = cmd->queue;
1875
1876 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1877 } else {
1878 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1879 }
1880 }
1881
1882 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1883 .owner = THIS_MODULE,
1884 .type = NVMF_TRTYPE_TCP,
1885 .msdbd = 1,
1886 .add_port = nvmet_tcp_add_port,
1887 .remove_port = nvmet_tcp_remove_port,
1888 .queue_response = nvmet_tcp_queue_response,
1889 .delete_ctrl = nvmet_tcp_delete_ctrl,
1890 .install_queue = nvmet_tcp_install_queue,
1891 .disc_traddr = nvmet_tcp_disc_port_addr,
1892 };
1893
nvmet_tcp_init(void)1894 static int __init nvmet_tcp_init(void)
1895 {
1896 int ret;
1897
1898 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1899 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1900 if (!nvmet_tcp_wq)
1901 return -ENOMEM;
1902
1903 ret = nvmet_register_transport(&nvmet_tcp_ops);
1904 if (ret)
1905 goto err;
1906
1907 return 0;
1908 err:
1909 destroy_workqueue(nvmet_tcp_wq);
1910 return ret;
1911 }
1912
nvmet_tcp_exit(void)1913 static void __exit nvmet_tcp_exit(void)
1914 {
1915 struct nvmet_tcp_queue *queue;
1916
1917 nvmet_unregister_transport(&nvmet_tcp_ops);
1918
1919 flush_workqueue(nvmet_wq);
1920 mutex_lock(&nvmet_tcp_queue_mutex);
1921 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1922 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1923 mutex_unlock(&nvmet_tcp_queue_mutex);
1924 flush_workqueue(nvmet_wq);
1925
1926 destroy_workqueue(nvmet_tcp_wq);
1927 ida_destroy(&nvmet_tcp_queue_ida);
1928 }
1929
1930 module_init(nvmet_tcp_init);
1931 module_exit(nvmet_tcp_exit);
1932
1933 MODULE_LICENSE("GPL v2");
1934 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
1935