1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18 
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
31 #include <linux/semaphore.h>
32 
33 #include "isert_proto.h"
34 #include "ib_isert.h"
35 
36 #define	ISERT_MAX_CONN		8
37 #define ISER_MAX_RX_CQ_LEN	(ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_TX_CQ_LEN	(ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
39 
40 static DEFINE_MUTEX(device_list_mutex);
41 static LIST_HEAD(device_list);
42 static struct workqueue_struct *isert_rx_wq;
43 static struct workqueue_struct *isert_comp_wq;
44 
45 static void
46 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
47 static int
48 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
49 	       struct isert_rdma_wr *wr);
50 static void
51 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
52 static int
53 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 	       struct isert_rdma_wr *wr);
55 static int
56 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
57 
58 static void
59 isert_qp_event_callback(struct ib_event *e, void *context)
60 {
61 	struct isert_conn *isert_conn = (struct isert_conn *)context;
62 
63 	pr_err("isert_qp_event_callback event: %d\n", e->event);
64 	switch (e->event) {
65 	case IB_EVENT_COMM_EST:
66 		rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
67 		break;
68 	case IB_EVENT_QP_LAST_WQE_REACHED:
69 		pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
70 		break;
71 	default:
72 		break;
73 	}
74 }
75 
76 static int
77 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
78 {
79 	int ret;
80 
81 	ret = ib_query_device(ib_dev, devattr);
82 	if (ret) {
83 		pr_err("ib_query_device() failed: %d\n", ret);
84 		return ret;
85 	}
86 	pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
87 	pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
88 
89 	return 0;
90 }
91 
92 static int
93 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
94 		    u8 protection)
95 {
96 	struct isert_device *device = isert_conn->conn_device;
97 	struct ib_qp_init_attr attr;
98 	int ret, index, min_index = 0;
99 
100 	mutex_lock(&device_list_mutex);
101 	for (index = 0; index < device->cqs_used; index++)
102 		if (device->cq_active_qps[index] <
103 		    device->cq_active_qps[min_index])
104 			min_index = index;
105 	device->cq_active_qps[min_index]++;
106 	pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
107 	mutex_unlock(&device_list_mutex);
108 
109 	memset(&attr, 0, sizeof(struct ib_qp_init_attr));
110 	attr.event_handler = isert_qp_event_callback;
111 	attr.qp_context = isert_conn;
112 	attr.send_cq = device->dev_tx_cq[min_index];
113 	attr.recv_cq = device->dev_rx_cq[min_index];
114 	attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
115 	attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
116 	/*
117 	 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
118 	 * work-around for RDMA_READ..
119 	 */
120 	attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
121 	isert_conn->max_sge = attr.cap.max_send_sge;
122 
123 	attr.cap.max_recv_sge = 1;
124 	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
125 	attr.qp_type = IB_QPT_RC;
126 	if (protection)
127 		attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
128 
129 	pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
130 		 cma_id->device);
131 	pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
132 		 isert_conn->conn_pd->device);
133 
134 	ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
135 	if (ret) {
136 		pr_err("rdma_create_qp failed for cma_id %d\n", ret);
137 		return ret;
138 	}
139 	isert_conn->conn_qp = cma_id->qp;
140 	pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
141 
142 	return 0;
143 }
144 
145 static void
146 isert_cq_event_callback(struct ib_event *e, void *context)
147 {
148 	pr_debug("isert_cq_event_callback event: %d\n", e->event);
149 }
150 
151 static int
152 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
153 {
154 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
155 	struct iser_rx_desc *rx_desc;
156 	struct ib_sge *rx_sg;
157 	u64 dma_addr;
158 	int i, j;
159 
160 	isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
161 				sizeof(struct iser_rx_desc), GFP_KERNEL);
162 	if (!isert_conn->conn_rx_descs)
163 		goto fail;
164 
165 	rx_desc = isert_conn->conn_rx_descs;
166 
167 	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
168 		dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
169 					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
170 		if (ib_dma_mapping_error(ib_dev, dma_addr))
171 			goto dma_map_fail;
172 
173 		rx_desc->dma_addr = dma_addr;
174 
175 		rx_sg = &rx_desc->rx_sg;
176 		rx_sg->addr = rx_desc->dma_addr;
177 		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
178 		rx_sg->lkey = isert_conn->conn_mr->lkey;
179 	}
180 
181 	isert_conn->conn_rx_desc_head = 0;
182 	return 0;
183 
184 dma_map_fail:
185 	rx_desc = isert_conn->conn_rx_descs;
186 	for (j = 0; j < i; j++, rx_desc++) {
187 		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
188 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
189 	}
190 	kfree(isert_conn->conn_rx_descs);
191 	isert_conn->conn_rx_descs = NULL;
192 fail:
193 	return -ENOMEM;
194 }
195 
196 static void
197 isert_free_rx_descriptors(struct isert_conn *isert_conn)
198 {
199 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
200 	struct iser_rx_desc *rx_desc;
201 	int i;
202 
203 	if (!isert_conn->conn_rx_descs)
204 		return;
205 
206 	rx_desc = isert_conn->conn_rx_descs;
207 	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
208 		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
209 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
210 	}
211 
212 	kfree(isert_conn->conn_rx_descs);
213 	isert_conn->conn_rx_descs = NULL;
214 }
215 
216 static void isert_cq_tx_work(struct work_struct *);
217 static void isert_cq_tx_callback(struct ib_cq *, void *);
218 static void isert_cq_rx_work(struct work_struct *);
219 static void isert_cq_rx_callback(struct ib_cq *, void *);
220 
221 static int
222 isert_create_device_ib_res(struct isert_device *device)
223 {
224 	struct ib_device *ib_dev = device->ib_device;
225 	struct isert_cq_desc *cq_desc;
226 	struct ib_device_attr *dev_attr;
227 	int ret = 0, i, j;
228 
229 	dev_attr = &device->dev_attr;
230 	ret = isert_query_device(ib_dev, dev_attr);
231 	if (ret)
232 		return ret;
233 
234 	/* asign function handlers */
235 	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
236 	    dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
237 		device->use_fastreg = 1;
238 		device->reg_rdma_mem = isert_reg_rdma;
239 		device->unreg_rdma_mem = isert_unreg_rdma;
240 	} else {
241 		device->use_fastreg = 0;
242 		device->reg_rdma_mem = isert_map_rdma;
243 		device->unreg_rdma_mem = isert_unmap_cmd;
244 	}
245 
246 	/* Check signature cap */
247 	device->pi_capable = dev_attr->device_cap_flags &
248 			     IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
249 
250 	device->cqs_used = min_t(int, num_online_cpus(),
251 				 device->ib_device->num_comp_vectors);
252 	device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
253 	pr_debug("Using %d CQs, device %s supports %d vectors support "
254 		 "Fast registration %d pi_capable %d\n",
255 		 device->cqs_used, device->ib_device->name,
256 		 device->ib_device->num_comp_vectors, device->use_fastreg,
257 		 device->pi_capable);
258 	device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
259 				device->cqs_used, GFP_KERNEL);
260 	if (!device->cq_desc) {
261 		pr_err("Unable to allocate device->cq_desc\n");
262 		return -ENOMEM;
263 	}
264 	cq_desc = device->cq_desc;
265 
266 	for (i = 0; i < device->cqs_used; i++) {
267 		cq_desc[i].device = device;
268 		cq_desc[i].cq_index = i;
269 
270 		INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
271 		device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
272 						isert_cq_rx_callback,
273 						isert_cq_event_callback,
274 						(void *)&cq_desc[i],
275 						ISER_MAX_RX_CQ_LEN, i);
276 		if (IS_ERR(device->dev_rx_cq[i])) {
277 			ret = PTR_ERR(device->dev_rx_cq[i]);
278 			device->dev_rx_cq[i] = NULL;
279 			goto out_cq;
280 		}
281 
282 		INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
283 		device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
284 						isert_cq_tx_callback,
285 						isert_cq_event_callback,
286 						(void *)&cq_desc[i],
287 						ISER_MAX_TX_CQ_LEN, i);
288 		if (IS_ERR(device->dev_tx_cq[i])) {
289 			ret = PTR_ERR(device->dev_tx_cq[i]);
290 			device->dev_tx_cq[i] = NULL;
291 			goto out_cq;
292 		}
293 
294 		ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
295 		if (ret)
296 			goto out_cq;
297 
298 		ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
299 		if (ret)
300 			goto out_cq;
301 	}
302 
303 	return 0;
304 
305 out_cq:
306 	for (j = 0; j < i; j++) {
307 		cq_desc = &device->cq_desc[j];
308 
309 		if (device->dev_rx_cq[j]) {
310 			cancel_work_sync(&cq_desc->cq_rx_work);
311 			ib_destroy_cq(device->dev_rx_cq[j]);
312 		}
313 		if (device->dev_tx_cq[j]) {
314 			cancel_work_sync(&cq_desc->cq_tx_work);
315 			ib_destroy_cq(device->dev_tx_cq[j]);
316 		}
317 	}
318 	kfree(device->cq_desc);
319 
320 	return ret;
321 }
322 
323 static void
324 isert_free_device_ib_res(struct isert_device *device)
325 {
326 	struct isert_cq_desc *cq_desc;
327 	int i;
328 
329 	for (i = 0; i < device->cqs_used; i++) {
330 		cq_desc = &device->cq_desc[i];
331 
332 		cancel_work_sync(&cq_desc->cq_rx_work);
333 		cancel_work_sync(&cq_desc->cq_tx_work);
334 		ib_destroy_cq(device->dev_rx_cq[i]);
335 		ib_destroy_cq(device->dev_tx_cq[i]);
336 		device->dev_rx_cq[i] = NULL;
337 		device->dev_tx_cq[i] = NULL;
338 	}
339 
340 	kfree(device->cq_desc);
341 }
342 
343 static void
344 isert_device_try_release(struct isert_device *device)
345 {
346 	mutex_lock(&device_list_mutex);
347 	device->refcount--;
348 	if (!device->refcount) {
349 		isert_free_device_ib_res(device);
350 		list_del(&device->dev_node);
351 		kfree(device);
352 	}
353 	mutex_unlock(&device_list_mutex);
354 }
355 
356 static struct isert_device *
357 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
358 {
359 	struct isert_device *device;
360 	int ret;
361 
362 	mutex_lock(&device_list_mutex);
363 	list_for_each_entry(device, &device_list, dev_node) {
364 		if (device->ib_device->node_guid == cma_id->device->node_guid) {
365 			device->refcount++;
366 			mutex_unlock(&device_list_mutex);
367 			return device;
368 		}
369 	}
370 
371 	device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
372 	if (!device) {
373 		mutex_unlock(&device_list_mutex);
374 		return ERR_PTR(-ENOMEM);
375 	}
376 
377 	INIT_LIST_HEAD(&device->dev_node);
378 
379 	device->ib_device = cma_id->device;
380 	ret = isert_create_device_ib_res(device);
381 	if (ret) {
382 		kfree(device);
383 		mutex_unlock(&device_list_mutex);
384 		return ERR_PTR(ret);
385 	}
386 
387 	device->refcount++;
388 	list_add_tail(&device->dev_node, &device_list);
389 	mutex_unlock(&device_list_mutex);
390 
391 	return device;
392 }
393 
394 static void
395 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
396 {
397 	struct fast_reg_descriptor *fr_desc, *tmp;
398 	int i = 0;
399 
400 	if (list_empty(&isert_conn->conn_fr_pool))
401 		return;
402 
403 	pr_debug("Freeing conn %p fastreg pool", isert_conn);
404 
405 	list_for_each_entry_safe(fr_desc, tmp,
406 				 &isert_conn->conn_fr_pool, list) {
407 		list_del(&fr_desc->list);
408 		ib_free_fast_reg_page_list(fr_desc->data_frpl);
409 		ib_dereg_mr(fr_desc->data_mr);
410 		if (fr_desc->pi_ctx) {
411 			ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
412 			ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
413 			ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
414 			kfree(fr_desc->pi_ctx);
415 		}
416 		kfree(fr_desc);
417 		++i;
418 	}
419 
420 	if (i < isert_conn->conn_fr_pool_size)
421 		pr_warn("Pool still has %d regions registered\n",
422 			isert_conn->conn_fr_pool_size - i);
423 }
424 
425 static int
426 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
427 		     struct fast_reg_descriptor *fr_desc, u8 protection)
428 {
429 	int ret;
430 
431 	fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
432 							 ISCSI_ISER_SG_TABLESIZE);
433 	if (IS_ERR(fr_desc->data_frpl)) {
434 		pr_err("Failed to allocate data frpl err=%ld\n",
435 		       PTR_ERR(fr_desc->data_frpl));
436 		return PTR_ERR(fr_desc->data_frpl);
437 	}
438 
439 	fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
440 	if (IS_ERR(fr_desc->data_mr)) {
441 		pr_err("Failed to allocate data frmr err=%ld\n",
442 		       PTR_ERR(fr_desc->data_mr));
443 		ret = PTR_ERR(fr_desc->data_mr);
444 		goto err_data_frpl;
445 	}
446 	pr_debug("Create fr_desc %p page_list %p\n",
447 		 fr_desc, fr_desc->data_frpl->page_list);
448 	fr_desc->ind |= ISERT_DATA_KEY_VALID;
449 
450 	if (protection) {
451 		struct ib_mr_init_attr mr_init_attr = {0};
452 		struct pi_context *pi_ctx;
453 
454 		fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
455 		if (!fr_desc->pi_ctx) {
456 			pr_err("Failed to allocate pi context\n");
457 			ret = -ENOMEM;
458 			goto err_data_mr;
459 		}
460 		pi_ctx = fr_desc->pi_ctx;
461 
462 		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
463 						    ISCSI_ISER_SG_TABLESIZE);
464 		if (IS_ERR(pi_ctx->prot_frpl)) {
465 			pr_err("Failed to allocate prot frpl err=%ld\n",
466 			       PTR_ERR(pi_ctx->prot_frpl));
467 			ret = PTR_ERR(pi_ctx->prot_frpl);
468 			goto err_pi_ctx;
469 		}
470 
471 		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
472 		if (IS_ERR(pi_ctx->prot_mr)) {
473 			pr_err("Failed to allocate prot frmr err=%ld\n",
474 			       PTR_ERR(pi_ctx->prot_mr));
475 			ret = PTR_ERR(pi_ctx->prot_mr);
476 			goto err_prot_frpl;
477 		}
478 		fr_desc->ind |= ISERT_PROT_KEY_VALID;
479 
480 		mr_init_attr.max_reg_descriptors = 2;
481 		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
482 		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
483 		if (IS_ERR(pi_ctx->sig_mr)) {
484 			pr_err("Failed to allocate signature enabled mr err=%ld\n",
485 			       PTR_ERR(pi_ctx->sig_mr));
486 			ret = PTR_ERR(pi_ctx->sig_mr);
487 			goto err_prot_mr;
488 		}
489 		fr_desc->ind |= ISERT_SIG_KEY_VALID;
490 	}
491 	fr_desc->ind &= ~ISERT_PROTECTED;
492 
493 	return 0;
494 err_prot_mr:
495 	ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
496 err_prot_frpl:
497 	ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
498 err_pi_ctx:
499 	kfree(fr_desc->pi_ctx);
500 err_data_mr:
501 	ib_dereg_mr(fr_desc->data_mr);
502 err_data_frpl:
503 	ib_free_fast_reg_page_list(fr_desc->data_frpl);
504 
505 	return ret;
506 }
507 
508 static int
509 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
510 {
511 	struct fast_reg_descriptor *fr_desc;
512 	struct isert_device *device = isert_conn->conn_device;
513 	struct se_session *se_sess = isert_conn->conn->sess->se_sess;
514 	struct se_node_acl *se_nacl = se_sess->se_node_acl;
515 	int i, ret, tag_num;
516 	/*
517 	 * Setup the number of FRMRs based upon the number of tags
518 	 * available to session in iscsi_target_locate_portal().
519 	 */
520 	tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
521 	tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
522 
523 	isert_conn->conn_fr_pool_size = 0;
524 	for (i = 0; i < tag_num; i++) {
525 		fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
526 		if (!fr_desc) {
527 			pr_err("Failed to allocate fast_reg descriptor\n");
528 			ret = -ENOMEM;
529 			goto err;
530 		}
531 
532 		ret = isert_create_fr_desc(device->ib_device,
533 					   isert_conn->conn_pd, fr_desc,
534 					   pi_support);
535 		if (ret) {
536 			pr_err("Failed to create fastreg descriptor err=%d\n",
537 			       ret);
538 			kfree(fr_desc);
539 			goto err;
540 		}
541 
542 		list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
543 		isert_conn->conn_fr_pool_size++;
544 	}
545 
546 	pr_debug("Creating conn %p fastreg pool size=%d",
547 		 isert_conn, isert_conn->conn_fr_pool_size);
548 
549 	return 0;
550 
551 err:
552 	isert_conn_free_fastreg_pool(isert_conn);
553 	return ret;
554 }
555 
556 static int
557 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
558 {
559 	struct iscsi_np *np = cma_id->context;
560 	struct isert_np *isert_np = np->np_context;
561 	struct isert_conn *isert_conn;
562 	struct isert_device *device;
563 	struct ib_device *ib_dev = cma_id->device;
564 	int ret = 0;
565 	u8 pi_support;
566 
567 	spin_lock_bh(&np->np_thread_lock);
568 	if (!np->enabled) {
569 		spin_unlock_bh(&np->np_thread_lock);
570 		pr_debug("iscsi_np is not enabled, reject connect request\n");
571 		return rdma_reject(cma_id, NULL, 0);
572 	}
573 	spin_unlock_bh(&np->np_thread_lock);
574 
575 	pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
576 		 cma_id, cma_id->context);
577 
578 	isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
579 	if (!isert_conn) {
580 		pr_err("Unable to allocate isert_conn\n");
581 		return -ENOMEM;
582 	}
583 	isert_conn->state = ISER_CONN_INIT;
584 	INIT_LIST_HEAD(&isert_conn->conn_accept_node);
585 	init_completion(&isert_conn->conn_login_comp);
586 	init_completion(&isert_conn->conn_wait);
587 	init_completion(&isert_conn->conn_wait_comp_err);
588 	kref_init(&isert_conn->conn_kref);
589 	mutex_init(&isert_conn->conn_mutex);
590 	spin_lock_init(&isert_conn->conn_lock);
591 	INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
592 
593 	cma_id->context = isert_conn;
594 	isert_conn->conn_cm_id = cma_id;
595 
596 	isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
597 					ISER_RX_LOGIN_SIZE, GFP_KERNEL);
598 	if (!isert_conn->login_buf) {
599 		pr_err("Unable to allocate isert_conn->login_buf\n");
600 		ret = -ENOMEM;
601 		goto out;
602 	}
603 
604 	isert_conn->login_req_buf = isert_conn->login_buf;
605 	isert_conn->login_rsp_buf = isert_conn->login_buf +
606 				    ISCSI_DEF_MAX_RECV_SEG_LEN;
607 	pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
608 		 isert_conn->login_buf, isert_conn->login_req_buf,
609 		 isert_conn->login_rsp_buf);
610 
611 	isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
612 				(void *)isert_conn->login_req_buf,
613 				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
614 
615 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
616 	if (ret) {
617 		pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
618 		       ret);
619 		isert_conn->login_req_dma = 0;
620 		goto out_login_buf;
621 	}
622 
623 	isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
624 					(void *)isert_conn->login_rsp_buf,
625 					ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
626 
627 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
628 	if (ret) {
629 		pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
630 		       ret);
631 		isert_conn->login_rsp_dma = 0;
632 		goto out_req_dma_map;
633 	}
634 
635 	device = isert_device_find_by_ib_dev(cma_id);
636 	if (IS_ERR(device)) {
637 		ret = PTR_ERR(device);
638 		goto out_rsp_dma_map;
639 	}
640 
641 	/* Set max inflight RDMA READ requests */
642 	isert_conn->initiator_depth = min_t(u8,
643 				event->param.conn.initiator_depth,
644 				device->dev_attr.max_qp_init_rd_atom);
645 	pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
646 
647 	isert_conn->conn_device = device;
648 	isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
649 	if (IS_ERR(isert_conn->conn_pd)) {
650 		ret = PTR_ERR(isert_conn->conn_pd);
651 		pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
652 		       isert_conn, ret);
653 		goto out_pd;
654 	}
655 
656 	isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
657 					   IB_ACCESS_LOCAL_WRITE);
658 	if (IS_ERR(isert_conn->conn_mr)) {
659 		ret = PTR_ERR(isert_conn->conn_mr);
660 		pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
661 		       isert_conn, ret);
662 		goto out_mr;
663 	}
664 
665 	pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
666 	if (pi_support && !device->pi_capable) {
667 		pr_err("Protection information requested but not supported, "
668 		       "rejecting connect request\n");
669 		ret = rdma_reject(cma_id, NULL, 0);
670 		goto out_mr;
671 	}
672 
673 	ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
674 	if (ret)
675 		goto out_conn_dev;
676 
677 	mutex_lock(&isert_np->np_accept_mutex);
678 	list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
679 	mutex_unlock(&isert_np->np_accept_mutex);
680 
681 	pr_debug("isert_connect_request() up np_sem np: %p\n", np);
682 	up(&isert_np->np_sem);
683 	return 0;
684 
685 out_conn_dev:
686 	ib_dereg_mr(isert_conn->conn_mr);
687 out_mr:
688 	ib_dealloc_pd(isert_conn->conn_pd);
689 out_pd:
690 	isert_device_try_release(device);
691 out_rsp_dma_map:
692 	ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
693 			    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
694 out_req_dma_map:
695 	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
696 			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
697 out_login_buf:
698 	kfree(isert_conn->login_buf);
699 out:
700 	kfree(isert_conn);
701 	return ret;
702 }
703 
704 static void
705 isert_connect_release(struct isert_conn *isert_conn)
706 {
707 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
708 	struct isert_device *device = isert_conn->conn_device;
709 	int cq_index;
710 
711 	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
712 
713 	if (device && device->use_fastreg)
714 		isert_conn_free_fastreg_pool(isert_conn);
715 
716 	if (isert_conn->conn_qp) {
717 		cq_index = ((struct isert_cq_desc *)
718 			isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
719 		pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
720 		isert_conn->conn_device->cq_active_qps[cq_index]--;
721 
722 		rdma_destroy_qp(isert_conn->conn_cm_id);
723 	}
724 
725 	isert_free_rx_descriptors(isert_conn);
726 	rdma_destroy_id(isert_conn->conn_cm_id);
727 
728 	ib_dereg_mr(isert_conn->conn_mr);
729 	ib_dealloc_pd(isert_conn->conn_pd);
730 
731 	if (isert_conn->login_buf) {
732 		ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
733 				    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
734 		ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
735 				    ISCSI_DEF_MAX_RECV_SEG_LEN,
736 				    DMA_FROM_DEVICE);
737 		kfree(isert_conn->login_buf);
738 	}
739 	kfree(isert_conn);
740 
741 	if (device)
742 		isert_device_try_release(device);
743 
744 	pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
745 }
746 
747 static void
748 isert_connected_handler(struct rdma_cm_id *cma_id)
749 {
750 	struct isert_conn *isert_conn = cma_id->context;
751 
752 	kref_get(&isert_conn->conn_kref);
753 }
754 
755 static void
756 isert_release_conn_kref(struct kref *kref)
757 {
758 	struct isert_conn *isert_conn = container_of(kref,
759 				struct isert_conn, conn_kref);
760 
761 	pr_debug("Calling isert_connect_release for final kref %s/%d\n",
762 		 current->comm, current->pid);
763 
764 	isert_connect_release(isert_conn);
765 }
766 
767 static void
768 isert_put_conn(struct isert_conn *isert_conn)
769 {
770 	kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
771 }
772 
773 static void
774 isert_disconnect_work(struct work_struct *work)
775 {
776 	struct isert_conn *isert_conn = container_of(work,
777 				struct isert_conn, conn_logout_work);
778 
779 	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
780 	mutex_lock(&isert_conn->conn_mutex);
781 	if (isert_conn->state == ISER_CONN_UP)
782 		isert_conn->state = ISER_CONN_TERMINATING;
783 
784 	if (isert_conn->post_recv_buf_count == 0 &&
785 	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
786 		mutex_unlock(&isert_conn->conn_mutex);
787 		goto wake_up;
788 	}
789 	if (!isert_conn->conn_cm_id) {
790 		mutex_unlock(&isert_conn->conn_mutex);
791 		isert_put_conn(isert_conn);
792 		return;
793 	}
794 
795 	if (isert_conn->disconnect) {
796 		/* Send DREQ/DREP towards our initiator */
797 		rdma_disconnect(isert_conn->conn_cm_id);
798 	}
799 
800 	mutex_unlock(&isert_conn->conn_mutex);
801 
802 wake_up:
803 	complete(&isert_conn->conn_wait);
804 }
805 
806 static void
807 isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
808 {
809 	struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
810 
811 	isert_conn->disconnect = disconnect;
812 	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
813 	schedule_work(&isert_conn->conn_logout_work);
814 }
815 
816 static int
817 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
818 {
819 	int ret = 0;
820 	bool disconnect = false;
821 
822 	pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
823 		 event->event, event->status, cma_id->context, cma_id);
824 
825 	switch (event->event) {
826 	case RDMA_CM_EVENT_CONNECT_REQUEST:
827 		ret = isert_connect_request(cma_id, event);
828 		break;
829 	case RDMA_CM_EVENT_ESTABLISHED:
830 		isert_connected_handler(cma_id);
831 		break;
832 	case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
833 	case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
834 	case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
835 		disconnect = true;
836 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
837 		isert_disconnected_handler(cma_id, disconnect);
838 		break;
839 	case RDMA_CM_EVENT_CONNECT_ERROR:
840 	default:
841 		pr_err("Unhandled RDMA CMA event: %d\n", event->event);
842 		break;
843 	}
844 
845 	if (ret != 0) {
846 		pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
847 		       event->event, ret);
848 		dump_stack();
849 	}
850 
851 	return ret;
852 }
853 
854 static int
855 isert_post_recv(struct isert_conn *isert_conn, u32 count)
856 {
857 	struct ib_recv_wr *rx_wr, *rx_wr_failed;
858 	int i, ret;
859 	unsigned int rx_head = isert_conn->conn_rx_desc_head;
860 	struct iser_rx_desc *rx_desc;
861 
862 	for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
863 		rx_desc		= &isert_conn->conn_rx_descs[rx_head];
864 		rx_wr->wr_id	= (unsigned long)rx_desc;
865 		rx_wr->sg_list	= &rx_desc->rx_sg;
866 		rx_wr->num_sge	= 1;
867 		rx_wr->next	= rx_wr + 1;
868 		rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
869 	}
870 
871 	rx_wr--;
872 	rx_wr->next = NULL; /* mark end of work requests list */
873 
874 	isert_conn->post_recv_buf_count += count;
875 	ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
876 				&rx_wr_failed);
877 	if (ret) {
878 		pr_err("ib_post_recv() failed with ret: %d\n", ret);
879 		isert_conn->post_recv_buf_count -= count;
880 	} else {
881 		pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
882 		isert_conn->conn_rx_desc_head = rx_head;
883 	}
884 	return ret;
885 }
886 
887 static int
888 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
889 {
890 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
891 	struct ib_send_wr send_wr, *send_wr_failed;
892 	int ret;
893 
894 	ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
895 				      ISER_HEADERS_LEN, DMA_TO_DEVICE);
896 
897 	send_wr.next	= NULL;
898 	send_wr.wr_id	= (unsigned long)tx_desc;
899 	send_wr.sg_list	= tx_desc->tx_sg;
900 	send_wr.num_sge	= tx_desc->num_sge;
901 	send_wr.opcode	= IB_WR_SEND;
902 	send_wr.send_flags = IB_SEND_SIGNALED;
903 
904 	atomic_inc(&isert_conn->post_send_buf_count);
905 
906 	ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
907 	if (ret) {
908 		pr_err("ib_post_send() failed, ret: %d\n", ret);
909 		atomic_dec(&isert_conn->post_send_buf_count);
910 	}
911 
912 	return ret;
913 }
914 
915 static void
916 isert_create_send_desc(struct isert_conn *isert_conn,
917 		       struct isert_cmd *isert_cmd,
918 		       struct iser_tx_desc *tx_desc)
919 {
920 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
921 
922 	ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
923 				   ISER_HEADERS_LEN, DMA_TO_DEVICE);
924 
925 	memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
926 	tx_desc->iser_header.flags = ISER_VER;
927 
928 	tx_desc->num_sge = 1;
929 	tx_desc->isert_cmd = isert_cmd;
930 
931 	if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
932 		tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
933 		pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
934 	}
935 }
936 
937 static int
938 isert_init_tx_hdrs(struct isert_conn *isert_conn,
939 		   struct iser_tx_desc *tx_desc)
940 {
941 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
942 	u64 dma_addr;
943 
944 	dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
945 			ISER_HEADERS_LEN, DMA_TO_DEVICE);
946 	if (ib_dma_mapping_error(ib_dev, dma_addr)) {
947 		pr_err("ib_dma_mapping_error() failed\n");
948 		return -ENOMEM;
949 	}
950 
951 	tx_desc->dma_addr = dma_addr;
952 	tx_desc->tx_sg[0].addr	= tx_desc->dma_addr;
953 	tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
954 	tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
955 
956 	pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
957 		 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
958 		 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
959 
960 	return 0;
961 }
962 
963 static void
964 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
965 		   struct ib_send_wr *send_wr, bool coalesce)
966 {
967 	struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
968 
969 	isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
970 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
971 	send_wr->opcode = IB_WR_SEND;
972 	send_wr->sg_list = &tx_desc->tx_sg[0];
973 	send_wr->num_sge = isert_cmd->tx_desc.num_sge;
974 	/*
975 	 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
976 	 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
977 	 */
978 	mutex_lock(&isert_conn->conn_mutex);
979 	if (coalesce && isert_conn->state == ISER_CONN_UP &&
980 	    ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
981 		tx_desc->llnode_active = true;
982 		llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
983 		mutex_unlock(&isert_conn->conn_mutex);
984 		return;
985 	}
986 	isert_conn->conn_comp_batch = 0;
987 	tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
988 	mutex_unlock(&isert_conn->conn_mutex);
989 
990 	send_wr->send_flags = IB_SEND_SIGNALED;
991 }
992 
993 static int
994 isert_rdma_post_recvl(struct isert_conn *isert_conn)
995 {
996 	struct ib_recv_wr rx_wr, *rx_wr_fail;
997 	struct ib_sge sge;
998 	int ret;
999 
1000 	memset(&sge, 0, sizeof(struct ib_sge));
1001 	sge.addr = isert_conn->login_req_dma;
1002 	sge.length = ISER_RX_LOGIN_SIZE;
1003 	sge.lkey = isert_conn->conn_mr->lkey;
1004 
1005 	pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
1006 		sge.addr, sge.length, sge.lkey);
1007 
1008 	memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1009 	rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
1010 	rx_wr.sg_list = &sge;
1011 	rx_wr.num_sge = 1;
1012 
1013 	isert_conn->post_recv_buf_count++;
1014 	ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1015 	if (ret) {
1016 		pr_err("ib_post_recv() failed: %d\n", ret);
1017 		isert_conn->post_recv_buf_count--;
1018 	}
1019 
1020 	pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1021 	return ret;
1022 }
1023 
1024 static int
1025 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1026 		   u32 length)
1027 {
1028 	struct isert_conn *isert_conn = conn->context;
1029 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1030 	struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1031 	int ret;
1032 
1033 	isert_create_send_desc(isert_conn, NULL, tx_desc);
1034 
1035 	memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1036 	       sizeof(struct iscsi_hdr));
1037 
1038 	isert_init_tx_hdrs(isert_conn, tx_desc);
1039 
1040 	if (length > 0) {
1041 		struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1042 
1043 		ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1044 					   length, DMA_TO_DEVICE);
1045 
1046 		memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1047 
1048 		ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1049 					      length, DMA_TO_DEVICE);
1050 
1051 		tx_dsg->addr	= isert_conn->login_rsp_dma;
1052 		tx_dsg->length	= length;
1053 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
1054 		tx_desc->num_sge = 2;
1055 	}
1056 	if (!login->login_failed) {
1057 		if (login->login_complete) {
1058 			if (!conn->sess->sess_ops->SessionType &&
1059 			    isert_conn->conn_device->use_fastreg) {
1060 				/* Normal Session and fastreg is used */
1061 				u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1062 
1063 				ret = isert_conn_create_fastreg_pool(isert_conn,
1064 								     pi_support);
1065 				if (ret) {
1066 					pr_err("Conn: %p failed to create"
1067 					       " fastreg pool\n", isert_conn);
1068 					return ret;
1069 				}
1070 			}
1071 
1072 			ret = isert_alloc_rx_descriptors(isert_conn);
1073 			if (ret)
1074 				return ret;
1075 
1076 			ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1077 			if (ret)
1078 				return ret;
1079 
1080 			isert_conn->state = ISER_CONN_UP;
1081 			goto post_send;
1082 		}
1083 
1084 		ret = isert_rdma_post_recvl(isert_conn);
1085 		if (ret)
1086 			return ret;
1087 	}
1088 post_send:
1089 	ret = isert_post_send(isert_conn, tx_desc);
1090 	if (ret)
1091 		return ret;
1092 
1093 	return 0;
1094 }
1095 
1096 static void
1097 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
1098 		   struct isert_conn *isert_conn)
1099 {
1100 	struct iscsi_conn *conn = isert_conn->conn;
1101 	struct iscsi_login *login = conn->conn_login;
1102 	int size;
1103 
1104 	if (!login) {
1105 		pr_err("conn->conn_login is NULL\n");
1106 		dump_stack();
1107 		return;
1108 	}
1109 
1110 	if (login->first_request) {
1111 		struct iscsi_login_req *login_req =
1112 			(struct iscsi_login_req *)&rx_desc->iscsi_header;
1113 		/*
1114 		 * Setup the initial iscsi_login values from the leading
1115 		 * login request PDU.
1116 		 */
1117 		login->leading_connection = (!login_req->tsih) ? 1 : 0;
1118 		login->current_stage =
1119 			(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1120 			 >> 2;
1121 		login->version_min	= login_req->min_version;
1122 		login->version_max	= login_req->max_version;
1123 		memcpy(login->isid, login_req->isid, 6);
1124 		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
1125 		login->init_task_tag	= login_req->itt;
1126 		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1127 		login->cid		= be16_to_cpu(login_req->cid);
1128 		login->tsih		= be16_to_cpu(login_req->tsih);
1129 	}
1130 
1131 	memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1132 
1133 	size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1134 	pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1135 		 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1136 	memcpy(login->req_buf, &rx_desc->data[0], size);
1137 
1138 	if (login->first_request) {
1139 		complete(&isert_conn->conn_login_comp);
1140 		return;
1141 	}
1142 	schedule_delayed_work(&conn->login_work, 0);
1143 }
1144 
1145 static struct iscsi_cmd
1146 *isert_allocate_cmd(struct iscsi_conn *conn)
1147 {
1148 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1149 	struct isert_cmd *isert_cmd;
1150 	struct iscsi_cmd *cmd;
1151 
1152 	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1153 	if (!cmd) {
1154 		pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1155 		return NULL;
1156 	}
1157 	isert_cmd = iscsit_priv_cmd(cmd);
1158 	isert_cmd->conn = isert_conn;
1159 	isert_cmd->iscsi_cmd = cmd;
1160 
1161 	return cmd;
1162 }
1163 
1164 static int
1165 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1166 		      struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1167 		      struct iser_rx_desc *rx_desc, unsigned char *buf)
1168 {
1169 	struct iscsi_conn *conn = isert_conn->conn;
1170 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1171 	struct scatterlist *sg;
1172 	int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1173 	bool dump_payload = false;
1174 
1175 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1176 	if (rc < 0)
1177 		return rc;
1178 
1179 	imm_data = cmd->immediate_data;
1180 	imm_data_len = cmd->first_burst_len;
1181 	unsol_data = cmd->unsolicited_data;
1182 
1183 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1184 	if (rc < 0) {
1185 		return 0;
1186 	} else if (rc > 0) {
1187 		dump_payload = true;
1188 		goto sequence_cmd;
1189 	}
1190 
1191 	if (!imm_data)
1192 		return 0;
1193 
1194 	sg = &cmd->se_cmd.t_data_sg[0];
1195 	sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1196 
1197 	pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1198 		 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1199 
1200 	sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1201 
1202 	cmd->write_data_done += imm_data_len;
1203 
1204 	if (cmd->write_data_done == cmd->se_cmd.data_length) {
1205 		spin_lock_bh(&cmd->istate_lock);
1206 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1207 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1208 		spin_unlock_bh(&cmd->istate_lock);
1209 	}
1210 
1211 sequence_cmd:
1212 	rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1213 
1214 	if (!rc && dump_payload == false && unsol_data)
1215 		iscsit_set_unsoliticed_dataout(cmd);
1216 	else if (dump_payload && imm_data)
1217 		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1218 
1219 	return 0;
1220 }
1221 
1222 static int
1223 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1224 			   struct iser_rx_desc *rx_desc, unsigned char *buf)
1225 {
1226 	struct scatterlist *sg_start;
1227 	struct iscsi_conn *conn = isert_conn->conn;
1228 	struct iscsi_cmd *cmd = NULL;
1229 	struct iscsi_data *hdr = (struct iscsi_data *)buf;
1230 	u32 unsol_data_len = ntoh24(hdr->dlength);
1231 	int rc, sg_nents, sg_off, page_off;
1232 
1233 	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1234 	if (rc < 0)
1235 		return rc;
1236 	else if (!cmd)
1237 		return 0;
1238 	/*
1239 	 * FIXME: Unexpected unsolicited_data out
1240 	 */
1241 	if (!cmd->unsolicited_data) {
1242 		pr_err("Received unexpected solicited data payload\n");
1243 		dump_stack();
1244 		return -1;
1245 	}
1246 
1247 	pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1248 		 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1249 
1250 	sg_off = cmd->write_data_done / PAGE_SIZE;
1251 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1252 	sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1253 	page_off = cmd->write_data_done % PAGE_SIZE;
1254 	/*
1255 	 * FIXME: Non page-aligned unsolicited_data out
1256 	 */
1257 	if (page_off) {
1258 		pr_err("Received unexpected non-page aligned data payload\n");
1259 		dump_stack();
1260 		return -1;
1261 	}
1262 	pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1263 		 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1264 
1265 	sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1266 			    unsol_data_len);
1267 
1268 	rc = iscsit_check_dataout_payload(cmd, hdr, false);
1269 	if (rc < 0)
1270 		return rc;
1271 
1272 	return 0;
1273 }
1274 
1275 static int
1276 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1277 		     struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1278 		     unsigned char *buf)
1279 {
1280 	struct iscsi_conn *conn = isert_conn->conn;
1281 	struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1282 	int rc;
1283 
1284 	rc = iscsit_setup_nop_out(conn, cmd, hdr);
1285 	if (rc < 0)
1286 		return rc;
1287 	/*
1288 	 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1289 	 */
1290 
1291 	return iscsit_process_nop_out(conn, cmd, hdr);
1292 }
1293 
1294 static int
1295 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1296 		      struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1297 		      struct iscsi_text *hdr)
1298 {
1299 	struct iscsi_conn *conn = isert_conn->conn;
1300 	u32 payload_length = ntoh24(hdr->dlength);
1301 	int rc;
1302 	unsigned char *text_in;
1303 
1304 	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1305 	if (rc < 0)
1306 		return rc;
1307 
1308 	text_in = kzalloc(payload_length, GFP_KERNEL);
1309 	if (!text_in) {
1310 		pr_err("Unable to allocate text_in of payload_length: %u\n",
1311 		       payload_length);
1312 		return -ENOMEM;
1313 	}
1314 	cmd->text_in_ptr = text_in;
1315 
1316 	memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1317 
1318 	return iscsit_process_text_cmd(conn, cmd, hdr);
1319 }
1320 
1321 static int
1322 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1323 		uint32_t read_stag, uint64_t read_va,
1324 		uint32_t write_stag, uint64_t write_va)
1325 {
1326 	struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1327 	struct iscsi_conn *conn = isert_conn->conn;
1328 	struct iscsi_session *sess = conn->sess;
1329 	struct iscsi_cmd *cmd;
1330 	struct isert_cmd *isert_cmd;
1331 	int ret = -EINVAL;
1332 	u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1333 
1334 	if (sess->sess_ops->SessionType &&
1335 	   (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1336 		pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1337 		       " ignoring\n", opcode);
1338 		return 0;
1339 	}
1340 
1341 	switch (opcode) {
1342 	case ISCSI_OP_SCSI_CMD:
1343 		cmd = isert_allocate_cmd(conn);
1344 		if (!cmd)
1345 			break;
1346 
1347 		isert_cmd = iscsit_priv_cmd(cmd);
1348 		isert_cmd->read_stag = read_stag;
1349 		isert_cmd->read_va = read_va;
1350 		isert_cmd->write_stag = write_stag;
1351 		isert_cmd->write_va = write_va;
1352 
1353 		ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1354 					rx_desc, (unsigned char *)hdr);
1355 		break;
1356 	case ISCSI_OP_NOOP_OUT:
1357 		cmd = isert_allocate_cmd(conn);
1358 		if (!cmd)
1359 			break;
1360 
1361 		isert_cmd = iscsit_priv_cmd(cmd);
1362 		ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1363 					   rx_desc, (unsigned char *)hdr);
1364 		break;
1365 	case ISCSI_OP_SCSI_DATA_OUT:
1366 		ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1367 						(unsigned char *)hdr);
1368 		break;
1369 	case ISCSI_OP_SCSI_TMFUNC:
1370 		cmd = isert_allocate_cmd(conn);
1371 		if (!cmd)
1372 			break;
1373 
1374 		ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1375 						(unsigned char *)hdr);
1376 		break;
1377 	case ISCSI_OP_LOGOUT:
1378 		cmd = isert_allocate_cmd(conn);
1379 		if (!cmd)
1380 			break;
1381 
1382 		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1383 		if (ret > 0)
1384 			wait_for_completion_timeout(&conn->conn_logout_comp,
1385 						    SECONDS_FOR_LOGOUT_COMP *
1386 						    HZ);
1387 		break;
1388 	case ISCSI_OP_TEXT:
1389 		cmd = isert_allocate_cmd(conn);
1390 		if (!cmd)
1391 			break;
1392 
1393 		isert_cmd = iscsit_priv_cmd(cmd);
1394 		ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1395 					    rx_desc, (struct iscsi_text *)hdr);
1396 		break;
1397 	default:
1398 		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1399 		dump_stack();
1400 		break;
1401 	}
1402 
1403 	return ret;
1404 }
1405 
1406 static void
1407 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1408 {
1409 	struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1410 	uint64_t read_va = 0, write_va = 0;
1411 	uint32_t read_stag = 0, write_stag = 0;
1412 	int rc;
1413 
1414 	switch (iser_hdr->flags & 0xF0) {
1415 	case ISCSI_CTRL:
1416 		if (iser_hdr->flags & ISER_RSV) {
1417 			read_stag = be32_to_cpu(iser_hdr->read_stag);
1418 			read_va = be64_to_cpu(iser_hdr->read_va);
1419 			pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1420 				 read_stag, (unsigned long long)read_va);
1421 		}
1422 		if (iser_hdr->flags & ISER_WSV) {
1423 			write_stag = be32_to_cpu(iser_hdr->write_stag);
1424 			write_va = be64_to_cpu(iser_hdr->write_va);
1425 			pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1426 				 write_stag, (unsigned long long)write_va);
1427 		}
1428 
1429 		pr_debug("ISER ISCSI_CTRL PDU\n");
1430 		break;
1431 	case ISER_HELLO:
1432 		pr_err("iSER Hello message\n");
1433 		break;
1434 	default:
1435 		pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1436 		break;
1437 	}
1438 
1439 	rc = isert_rx_opcode(isert_conn, rx_desc,
1440 			     read_stag, read_va, write_stag, write_va);
1441 }
1442 
1443 static void
1444 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1445 		    unsigned long xfer_len)
1446 {
1447 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1448 	struct iscsi_hdr *hdr;
1449 	u64 rx_dma;
1450 	int rx_buflen, outstanding;
1451 
1452 	if ((char *)desc == isert_conn->login_req_buf) {
1453 		rx_dma = isert_conn->login_req_dma;
1454 		rx_buflen = ISER_RX_LOGIN_SIZE;
1455 		pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1456 			 rx_dma, rx_buflen);
1457 	} else {
1458 		rx_dma = desc->dma_addr;
1459 		rx_buflen = ISER_RX_PAYLOAD_SIZE;
1460 		pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1461 			 rx_dma, rx_buflen);
1462 	}
1463 
1464 	ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1465 
1466 	hdr = &desc->iscsi_header;
1467 	pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1468 		 hdr->opcode, hdr->itt, hdr->flags,
1469 		 (int)(xfer_len - ISER_HEADERS_LEN));
1470 
1471 	if ((char *)desc == isert_conn->login_req_buf)
1472 		isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1473 				   isert_conn);
1474 	else
1475 		isert_rx_do_work(desc, isert_conn);
1476 
1477 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1478 				      DMA_FROM_DEVICE);
1479 
1480 	isert_conn->post_recv_buf_count--;
1481 	pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1482 		 isert_conn->post_recv_buf_count);
1483 
1484 	if ((char *)desc == isert_conn->login_req_buf)
1485 		return;
1486 
1487 	outstanding = isert_conn->post_recv_buf_count;
1488 	if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1489 		int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1490 				ISERT_MIN_POSTED_RX);
1491 		err = isert_post_recv(isert_conn, count);
1492 		if (err) {
1493 			pr_err("isert_post_recv() count: %d failed, %d\n",
1494 			       count, err);
1495 		}
1496 	}
1497 }
1498 
1499 static int
1500 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1501 		   struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1502 		   enum iser_ib_op_code op, struct isert_data_buf *data)
1503 {
1504 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1505 
1506 	data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1507 			      DMA_TO_DEVICE : DMA_FROM_DEVICE;
1508 
1509 	data->len = length - offset;
1510 	data->offset = offset;
1511 	data->sg_off = data->offset / PAGE_SIZE;
1512 
1513 	data->sg = &sg[data->sg_off];
1514 	data->nents = min_t(unsigned int, nents - data->sg_off,
1515 					  ISCSI_ISER_SG_TABLESIZE);
1516 	data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1517 					PAGE_SIZE);
1518 
1519 	data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1520 					data->dma_dir);
1521 	if (unlikely(!data->dma_nents)) {
1522 		pr_err("Cmd: unable to dma map SGs %p\n", sg);
1523 		return -EINVAL;
1524 	}
1525 
1526 	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1527 		 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1528 
1529 	return 0;
1530 }
1531 
1532 static void
1533 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1534 {
1535 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1536 
1537 	ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1538 	memset(data, 0, sizeof(*data));
1539 }
1540 
1541 
1542 
1543 static void
1544 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1545 {
1546 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1547 
1548 	pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1549 
1550 	if (wr->data.sg) {
1551 		pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1552 		isert_unmap_data_buf(isert_conn, &wr->data);
1553 	}
1554 
1555 	if (wr->send_wr) {
1556 		pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1557 		kfree(wr->send_wr);
1558 		wr->send_wr = NULL;
1559 	}
1560 
1561 	if (wr->ib_sge) {
1562 		pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1563 		kfree(wr->ib_sge);
1564 		wr->ib_sge = NULL;
1565 	}
1566 }
1567 
1568 static void
1569 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1570 {
1571 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1572 	LIST_HEAD(unmap_list);
1573 
1574 	pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
1575 
1576 	if (wr->fr_desc) {
1577 		pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1578 			 isert_cmd, wr->fr_desc);
1579 		if (wr->fr_desc->ind & ISERT_PROTECTED) {
1580 			isert_unmap_data_buf(isert_conn, &wr->prot);
1581 			wr->fr_desc->ind &= ~ISERT_PROTECTED;
1582 		}
1583 		spin_lock_bh(&isert_conn->conn_lock);
1584 		list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1585 		spin_unlock_bh(&isert_conn->conn_lock);
1586 		wr->fr_desc = NULL;
1587 	}
1588 
1589 	if (wr->data.sg) {
1590 		pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1591 		isert_unmap_data_buf(isert_conn, &wr->data);
1592 	}
1593 
1594 	wr->ib_sge = NULL;
1595 	wr->send_wr = NULL;
1596 }
1597 
1598 static void
1599 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1600 {
1601 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1602 	struct isert_conn *isert_conn = isert_cmd->conn;
1603 	struct iscsi_conn *conn = isert_conn->conn;
1604 	struct isert_device *device = isert_conn->conn_device;
1605 
1606 	pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1607 
1608 	switch (cmd->iscsi_opcode) {
1609 	case ISCSI_OP_SCSI_CMD:
1610 		spin_lock_bh(&conn->cmd_lock);
1611 		if (!list_empty(&cmd->i_conn_node))
1612 			list_del_init(&cmd->i_conn_node);
1613 		spin_unlock_bh(&conn->cmd_lock);
1614 
1615 		if (cmd->data_direction == DMA_TO_DEVICE) {
1616 			iscsit_stop_dataout_timer(cmd);
1617 			/*
1618 			 * Check for special case during comp_err where
1619 			 * WRITE_PENDING has been handed off from core,
1620 			 * but requires an extra target_put_sess_cmd()
1621 			 * before transport_generic_free_cmd() below.
1622 			 */
1623 			if (comp_err &&
1624 			    cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1625 				struct se_cmd *se_cmd = &cmd->se_cmd;
1626 
1627 				target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1628 			}
1629 		}
1630 
1631 		device->unreg_rdma_mem(isert_cmd, isert_conn);
1632 		transport_generic_free_cmd(&cmd->se_cmd, 0);
1633 		break;
1634 	case ISCSI_OP_SCSI_TMFUNC:
1635 		spin_lock_bh(&conn->cmd_lock);
1636 		if (!list_empty(&cmd->i_conn_node))
1637 			list_del_init(&cmd->i_conn_node);
1638 		spin_unlock_bh(&conn->cmd_lock);
1639 
1640 		transport_generic_free_cmd(&cmd->se_cmd, 0);
1641 		break;
1642 	case ISCSI_OP_REJECT:
1643 	case ISCSI_OP_NOOP_OUT:
1644 	case ISCSI_OP_TEXT:
1645 		spin_lock_bh(&conn->cmd_lock);
1646 		if (!list_empty(&cmd->i_conn_node))
1647 			list_del_init(&cmd->i_conn_node);
1648 		spin_unlock_bh(&conn->cmd_lock);
1649 
1650 		/*
1651 		 * Handle special case for REJECT when iscsi_add_reject*() has
1652 		 * overwritten the original iscsi_opcode assignment, and the
1653 		 * associated cmd->se_cmd needs to be released.
1654 		 */
1655 		if (cmd->se_cmd.se_tfo != NULL) {
1656 			pr_debug("Calling transport_generic_free_cmd from"
1657 				 " isert_put_cmd for 0x%02x\n",
1658 				 cmd->iscsi_opcode);
1659 			transport_generic_free_cmd(&cmd->se_cmd, 0);
1660 			break;
1661 		}
1662 		/*
1663 		 * Fall-through
1664 		 */
1665 	default:
1666 		iscsit_release_cmd(cmd);
1667 		break;
1668 	}
1669 }
1670 
1671 static void
1672 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1673 {
1674 	if (tx_desc->dma_addr != 0) {
1675 		pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1676 		ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1677 				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
1678 		tx_desc->dma_addr = 0;
1679 	}
1680 }
1681 
1682 static void
1683 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1684 		     struct ib_device *ib_dev, bool comp_err)
1685 {
1686 	if (isert_cmd->pdu_buf_dma != 0) {
1687 		pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1688 		ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1689 				    isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1690 		isert_cmd->pdu_buf_dma = 0;
1691 	}
1692 
1693 	isert_unmap_tx_desc(tx_desc, ib_dev);
1694 	isert_put_cmd(isert_cmd, comp_err);
1695 }
1696 
1697 static int
1698 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1699 {
1700 	struct ib_mr_status mr_status;
1701 	int ret;
1702 
1703 	ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1704 	if (ret) {
1705 		pr_err("ib_check_mr_status failed, ret %d\n", ret);
1706 		goto fail_mr_status;
1707 	}
1708 
1709 	if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1710 		u64 sec_offset_err;
1711 		u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1712 
1713 		switch (mr_status.sig_err.err_type) {
1714 		case IB_SIG_BAD_GUARD:
1715 			se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1716 			break;
1717 		case IB_SIG_BAD_REFTAG:
1718 			se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1719 			break;
1720 		case IB_SIG_BAD_APPTAG:
1721 			se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1722 			break;
1723 		}
1724 		sec_offset_err = mr_status.sig_err.sig_err_offset;
1725 		do_div(sec_offset_err, block_size);
1726 		se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1727 
1728 		pr_err("isert: PI error found type %d at sector 0x%llx "
1729 		       "expected 0x%x vs actual 0x%x\n",
1730 		       mr_status.sig_err.err_type,
1731 		       (unsigned long long)se_cmd->bad_sector,
1732 		       mr_status.sig_err.expected,
1733 		       mr_status.sig_err.actual);
1734 		ret = 1;
1735 	}
1736 
1737 fail_mr_status:
1738 	return ret;
1739 }
1740 
1741 static void
1742 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1743 			    struct isert_cmd *isert_cmd)
1744 {
1745 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1746 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1747 	struct se_cmd *se_cmd = &cmd->se_cmd;
1748 	struct isert_conn *isert_conn = isert_cmd->conn;
1749 	struct isert_device *device = isert_conn->conn_device;
1750 	int ret = 0;
1751 
1752 	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1753 		ret = isert_check_pi_status(se_cmd,
1754 					    wr->fr_desc->pi_ctx->sig_mr);
1755 		wr->fr_desc->ind &= ~ISERT_PROTECTED;
1756 	}
1757 
1758 	device->unreg_rdma_mem(isert_cmd, isert_conn);
1759 	wr->send_wr_num = 0;
1760 	if (ret)
1761 		transport_send_check_condition_and_sense(se_cmd,
1762 							 se_cmd->pi_err, 0);
1763 	else
1764 		isert_put_response(isert_conn->conn, cmd);
1765 }
1766 
1767 static void
1768 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1769 			   struct isert_cmd *isert_cmd)
1770 {
1771 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1772 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1773 	struct se_cmd *se_cmd = &cmd->se_cmd;
1774 	struct isert_conn *isert_conn = isert_cmd->conn;
1775 	struct isert_device *device = isert_conn->conn_device;
1776 	int ret = 0;
1777 
1778 	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1779 		ret = isert_check_pi_status(se_cmd,
1780 					    wr->fr_desc->pi_ctx->sig_mr);
1781 		wr->fr_desc->ind &= ~ISERT_PROTECTED;
1782 	}
1783 
1784 	iscsit_stop_dataout_timer(cmd);
1785 	device->unreg_rdma_mem(isert_cmd, isert_conn);
1786 	cmd->write_data_done = wr->data.len;
1787 	wr->send_wr_num = 0;
1788 
1789 	pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1790 	spin_lock_bh(&cmd->istate_lock);
1791 	cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1792 	cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1793 	spin_unlock_bh(&cmd->istate_lock);
1794 
1795 	if (ret)
1796 		transport_send_check_condition_and_sense(se_cmd,
1797 							 se_cmd->pi_err, 0);
1798 	else
1799 		target_execute_cmd(se_cmd);
1800 }
1801 
1802 static void
1803 isert_do_control_comp(struct work_struct *work)
1804 {
1805 	struct isert_cmd *isert_cmd = container_of(work,
1806 			struct isert_cmd, comp_work);
1807 	struct isert_conn *isert_conn = isert_cmd->conn;
1808 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1809 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1810 
1811 	switch (cmd->i_state) {
1812 	case ISTATE_SEND_TASKMGTRSP:
1813 		pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1814 
1815 		atomic_dec(&isert_conn->post_send_buf_count);
1816 		iscsit_tmr_post_handler(cmd, cmd->conn);
1817 
1818 		cmd->i_state = ISTATE_SENT_STATUS;
1819 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1820 		break;
1821 	case ISTATE_SEND_REJECT:
1822 		pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1823 		atomic_dec(&isert_conn->post_send_buf_count);
1824 
1825 		cmd->i_state = ISTATE_SENT_STATUS;
1826 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1827 		break;
1828 	case ISTATE_SEND_LOGOUTRSP:
1829 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1830 
1831 		atomic_dec(&isert_conn->post_send_buf_count);
1832 		iscsit_logout_post_handler(cmd, cmd->conn);
1833 		break;
1834 	case ISTATE_SEND_TEXTRSP:
1835 		atomic_dec(&isert_conn->post_send_buf_count);
1836 		cmd->i_state = ISTATE_SENT_STATUS;
1837 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1838 		break;
1839 	default:
1840 		pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1841 		dump_stack();
1842 		break;
1843 	}
1844 }
1845 
1846 static void
1847 isert_response_completion(struct iser_tx_desc *tx_desc,
1848 			  struct isert_cmd *isert_cmd,
1849 			  struct isert_conn *isert_conn,
1850 			  struct ib_device *ib_dev)
1851 {
1852 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1853 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1854 
1855 	if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1856 	    cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1857 	    cmd->i_state == ISTATE_SEND_REJECT ||
1858 	    cmd->i_state == ISTATE_SEND_TEXTRSP) {
1859 		isert_unmap_tx_desc(tx_desc, ib_dev);
1860 
1861 		INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1862 		queue_work(isert_comp_wq, &isert_cmd->comp_work);
1863 		return;
1864 	}
1865 
1866 	/**
1867 	 * If send_wr_num is 0 this means that we got
1868 	 * RDMA completion and we cleared it and we should
1869 	 * simply decrement the response post. else the
1870 	 * response is incorporated in send_wr_num, just
1871 	 * sub it.
1872 	 **/
1873 	if (wr->send_wr_num)
1874 		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1875 	else
1876 		atomic_dec(&isert_conn->post_send_buf_count);
1877 
1878 	cmd->i_state = ISTATE_SENT_STATUS;
1879 	isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1880 }
1881 
1882 static void
1883 __isert_send_completion(struct iser_tx_desc *tx_desc,
1884 		        struct isert_conn *isert_conn)
1885 {
1886 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1887 	struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1888 	struct isert_rdma_wr *wr;
1889 
1890 	if (!isert_cmd) {
1891 		atomic_dec(&isert_conn->post_send_buf_count);
1892 		isert_unmap_tx_desc(tx_desc, ib_dev);
1893 		return;
1894 	}
1895 	wr = &isert_cmd->rdma_wr;
1896 
1897 	switch (wr->iser_ib_op) {
1898 	case ISER_IB_RECV:
1899 		pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1900 		dump_stack();
1901 		break;
1902 	case ISER_IB_SEND:
1903 		pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1904 		isert_response_completion(tx_desc, isert_cmd,
1905 					  isert_conn, ib_dev);
1906 		break;
1907 	case ISER_IB_RDMA_WRITE:
1908 		pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1909 		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1910 		isert_completion_rdma_write(tx_desc, isert_cmd);
1911 		break;
1912 	case ISER_IB_RDMA_READ:
1913 		pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1914 
1915 		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1916 		isert_completion_rdma_read(tx_desc, isert_cmd);
1917 		break;
1918 	default:
1919 		pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1920 		dump_stack();
1921 		break;
1922 	}
1923 }
1924 
1925 static void
1926 isert_send_completion(struct iser_tx_desc *tx_desc,
1927 		      struct isert_conn *isert_conn)
1928 {
1929 	struct llist_node *llnode = tx_desc->comp_llnode_batch;
1930 	struct iser_tx_desc *t;
1931 	/*
1932 	 * Drain coalesced completion llist starting from comp_llnode_batch
1933 	 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1934 	 */
1935 	while (llnode) {
1936 		t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1937 		llnode = llist_next(llnode);
1938 		__isert_send_completion(t, isert_conn);
1939 	}
1940 	__isert_send_completion(tx_desc, isert_conn);
1941 }
1942 
1943 static void
1944 isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
1945 {
1946 	struct llist_node *llnode;
1947 	struct isert_rdma_wr *wr;
1948 	struct iser_tx_desc *t;
1949 
1950 	mutex_lock(&isert_conn->conn_mutex);
1951 	llnode = llist_del_all(&isert_conn->conn_comp_llist);
1952 	isert_conn->conn_comp_batch = 0;
1953 	mutex_unlock(&isert_conn->conn_mutex);
1954 
1955 	while (llnode) {
1956 		t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1957 		llnode = llist_next(llnode);
1958 		wr = &t->isert_cmd->rdma_wr;
1959 
1960 		/**
1961 		 * If send_wr_num is 0 this means that we got
1962 		 * RDMA completion and we cleared it and we should
1963 		 * simply decrement the response post. else the
1964 		 * response is incorporated in send_wr_num, just
1965 		 * sub it.
1966 		 **/
1967 		if (wr->send_wr_num)
1968 			atomic_sub(wr->send_wr_num,
1969 				   &isert_conn->post_send_buf_count);
1970 		else
1971 			atomic_dec(&isert_conn->post_send_buf_count);
1972 
1973 		isert_completion_put(t, t->isert_cmd, ib_dev, true);
1974 	}
1975 }
1976 
1977 static void
1978 isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1979 {
1980 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1981 	struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1982 	struct llist_node *llnode = tx_desc->comp_llnode_batch;
1983 	struct isert_rdma_wr *wr;
1984 	struct iser_tx_desc *t;
1985 
1986 	while (llnode) {
1987 		t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1988 		llnode = llist_next(llnode);
1989 		wr = &t->isert_cmd->rdma_wr;
1990 
1991 		/**
1992 		 * If send_wr_num is 0 this means that we got
1993 		 * RDMA completion and we cleared it and we should
1994 		 * simply decrement the response post. else the
1995 		 * response is incorporated in send_wr_num, just
1996 		 * sub it.
1997 		 **/
1998 		if (wr->send_wr_num)
1999 			atomic_sub(wr->send_wr_num,
2000 				   &isert_conn->post_send_buf_count);
2001 		else
2002 			atomic_dec(&isert_conn->post_send_buf_count);
2003 
2004 		isert_completion_put(t, t->isert_cmd, ib_dev, true);
2005 	}
2006 	tx_desc->comp_llnode_batch = NULL;
2007 
2008 	if (!isert_cmd)
2009 		isert_unmap_tx_desc(tx_desc, ib_dev);
2010 	else
2011 		isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
2012 }
2013 
2014 static void
2015 isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2016 {
2017 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2018 	struct iscsi_conn *conn = isert_conn->conn;
2019 
2020 	if (isert_conn->post_recv_buf_count)
2021 		return;
2022 
2023 	isert_cq_drain_comp_llist(isert_conn, ib_dev);
2024 
2025 	if (conn->sess) {
2026 		target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2027 		target_wait_for_sess_cmds(conn->sess->se_sess);
2028 	}
2029 
2030 	while (atomic_read(&isert_conn->post_send_buf_count))
2031 		msleep(3000);
2032 
2033 	mutex_lock(&isert_conn->conn_mutex);
2034 	isert_conn->state = ISER_CONN_DOWN;
2035 	mutex_unlock(&isert_conn->conn_mutex);
2036 
2037 	iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2038 
2039 	complete(&isert_conn->conn_wait_comp_err);
2040 }
2041 
2042 static void
2043 isert_cq_tx_work(struct work_struct *work)
2044 {
2045 	struct isert_cq_desc *cq_desc = container_of(work,
2046 				struct isert_cq_desc, cq_tx_work);
2047 	struct isert_device *device = cq_desc->device;
2048 	int cq_index = cq_desc->cq_index;
2049 	struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
2050 	struct isert_conn *isert_conn;
2051 	struct iser_tx_desc *tx_desc;
2052 	struct ib_wc wc;
2053 
2054 	while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
2055 		tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
2056 		isert_conn = wc.qp->qp_context;
2057 
2058 		if (wc.status == IB_WC_SUCCESS) {
2059 			isert_send_completion(tx_desc, isert_conn);
2060 		} else {
2061 			pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2062 			pr_debug("TX wc.status: 0x%08x\n", wc.status);
2063 			pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
2064 
2065 			if (wc.wr_id != ISER_FASTREG_LI_WRID) {
2066 				if (tx_desc->llnode_active)
2067 					continue;
2068 
2069 				atomic_dec(&isert_conn->post_send_buf_count);
2070 				isert_cq_tx_comp_err(tx_desc, isert_conn);
2071 			}
2072 		}
2073 	}
2074 
2075 	ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
2076 }
2077 
2078 static void
2079 isert_cq_tx_callback(struct ib_cq *cq, void *context)
2080 {
2081 	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2082 
2083 	queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
2084 }
2085 
2086 static void
2087 isert_cq_rx_work(struct work_struct *work)
2088 {
2089 	struct isert_cq_desc *cq_desc = container_of(work,
2090 			struct isert_cq_desc, cq_rx_work);
2091 	struct isert_device *device = cq_desc->device;
2092 	int cq_index = cq_desc->cq_index;
2093 	struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
2094 	struct isert_conn *isert_conn;
2095 	struct iser_rx_desc *rx_desc;
2096 	struct ib_wc wc;
2097 	unsigned long xfer_len;
2098 
2099 	while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
2100 		rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
2101 		isert_conn = wc.qp->qp_context;
2102 
2103 		if (wc.status == IB_WC_SUCCESS) {
2104 			xfer_len = (unsigned long)wc.byte_len;
2105 			isert_rx_completion(rx_desc, isert_conn, xfer_len);
2106 		} else {
2107 			pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2108 			if (wc.status != IB_WC_WR_FLUSH_ERR) {
2109 				pr_debug("RX wc.status: 0x%08x\n", wc.status);
2110 				pr_debug("RX wc.vendor_err: 0x%08x\n",
2111 					 wc.vendor_err);
2112 			}
2113 			isert_conn->post_recv_buf_count--;
2114 			isert_cq_rx_comp_err(isert_conn);
2115 		}
2116 	}
2117 
2118 	ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
2119 }
2120 
2121 static void
2122 isert_cq_rx_callback(struct ib_cq *cq, void *context)
2123 {
2124 	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2125 
2126 	queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
2127 }
2128 
2129 static int
2130 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2131 {
2132 	struct ib_send_wr *wr_failed;
2133 	int ret;
2134 
2135 	atomic_inc(&isert_conn->post_send_buf_count);
2136 
2137 	ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2138 			   &wr_failed);
2139 	if (ret) {
2140 		pr_err("ib_post_send failed with %d\n", ret);
2141 		atomic_dec(&isert_conn->post_send_buf_count);
2142 		return ret;
2143 	}
2144 	return ret;
2145 }
2146 
2147 static int
2148 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2149 {
2150 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2151 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2152 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2153 	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2154 				&isert_cmd->tx_desc.iscsi_header;
2155 
2156 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2157 	iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2158 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2159 	/*
2160 	 * Attach SENSE DATA payload to iSCSI Response PDU
2161 	 */
2162 	if (cmd->se_cmd.sense_buffer &&
2163 	    ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2164 	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2165 		struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2166 		struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2167 		u32 padding, pdu_len;
2168 
2169 		put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2170 				   cmd->sense_buffer);
2171 		cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2172 
2173 		padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2174 		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2175 		pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2176 
2177 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2178 				(void *)cmd->sense_buffer, pdu_len,
2179 				DMA_TO_DEVICE);
2180 
2181 		isert_cmd->pdu_buf_len = pdu_len;
2182 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
2183 		tx_dsg->length	= pdu_len;
2184 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
2185 		isert_cmd->tx_desc.num_sge = 2;
2186 	}
2187 
2188 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2189 
2190 	pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2191 
2192 	return isert_post_response(isert_conn, isert_cmd);
2193 }
2194 
2195 static void
2196 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2197 {
2198 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2199 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2200 	struct isert_device *device = isert_conn->conn_device;
2201 
2202 	spin_lock_bh(&conn->cmd_lock);
2203 	if (!list_empty(&cmd->i_conn_node))
2204 		list_del_init(&cmd->i_conn_node);
2205 	spin_unlock_bh(&conn->cmd_lock);
2206 
2207 	if (cmd->data_direction == DMA_TO_DEVICE)
2208 		iscsit_stop_dataout_timer(cmd);
2209 
2210 	device->unreg_rdma_mem(isert_cmd, isert_conn);
2211 }
2212 
2213 static enum target_prot_op
2214 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2215 {
2216 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2217 	struct isert_device *device = isert_conn->conn_device;
2218 
2219 	if (device->pi_capable)
2220 		return TARGET_PROT_ALL;
2221 
2222 	return TARGET_PROT_NORMAL;
2223 }
2224 
2225 static int
2226 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2227 		bool nopout_response)
2228 {
2229 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2230 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2231 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2232 
2233 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2234 	iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2235 			       &isert_cmd->tx_desc.iscsi_header,
2236 			       nopout_response);
2237 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2238 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2239 
2240 	pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2241 
2242 	return isert_post_response(isert_conn, isert_cmd);
2243 }
2244 
2245 static int
2246 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2247 {
2248 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2249 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2250 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2251 
2252 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2253 	iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2254 				&isert_cmd->tx_desc.iscsi_header);
2255 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2256 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2257 
2258 	pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2259 
2260 	return isert_post_response(isert_conn, isert_cmd);
2261 }
2262 
2263 static int
2264 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2265 {
2266 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2267 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2268 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2269 
2270 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2271 	iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2272 				  &isert_cmd->tx_desc.iscsi_header);
2273 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2274 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2275 
2276 	pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2277 
2278 	return isert_post_response(isert_conn, isert_cmd);
2279 }
2280 
2281 static int
2282 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2283 {
2284 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2285 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2286 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2287 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2288 	struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2289 	struct iscsi_reject *hdr =
2290 		(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2291 
2292 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2293 	iscsit_build_reject(cmd, conn, hdr);
2294 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2295 
2296 	hton24(hdr->dlength, ISCSI_HDR_LEN);
2297 	isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2298 			(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2299 			DMA_TO_DEVICE);
2300 	isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2301 	tx_dsg->addr	= isert_cmd->pdu_buf_dma;
2302 	tx_dsg->length	= ISCSI_HDR_LEN;
2303 	tx_dsg->lkey	= isert_conn->conn_mr->lkey;
2304 	isert_cmd->tx_desc.num_sge = 2;
2305 
2306 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2307 
2308 	pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2309 
2310 	return isert_post_response(isert_conn, isert_cmd);
2311 }
2312 
2313 static int
2314 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2315 {
2316 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2317 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2318 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2319 	struct iscsi_text_rsp *hdr =
2320 		(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2321 	u32 txt_rsp_len;
2322 	int rc;
2323 
2324 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2325 	rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2326 	if (rc < 0)
2327 		return rc;
2328 
2329 	txt_rsp_len = rc;
2330 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2331 
2332 	if (txt_rsp_len) {
2333 		struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2334 		struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2335 		void *txt_rsp_buf = cmd->buf_ptr;
2336 
2337 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2338 				txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2339 
2340 		isert_cmd->pdu_buf_len = txt_rsp_len;
2341 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
2342 		tx_dsg->length	= txt_rsp_len;
2343 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
2344 		isert_cmd->tx_desc.num_sge = 2;
2345 	}
2346 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2347 
2348 	pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2349 
2350 	return isert_post_response(isert_conn, isert_cmd);
2351 }
2352 
2353 static int
2354 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2355 		    struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2356 		    u32 data_left, u32 offset)
2357 {
2358 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2359 	struct scatterlist *sg_start, *tmp_sg;
2360 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2361 	u32 sg_off, page_off;
2362 	int i = 0, sg_nents;
2363 
2364 	sg_off = offset / PAGE_SIZE;
2365 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2366 	sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2367 	page_off = offset % PAGE_SIZE;
2368 
2369 	send_wr->sg_list = ib_sge;
2370 	send_wr->num_sge = sg_nents;
2371 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2372 	/*
2373 	 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2374 	 */
2375 	for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2376 		pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2377 			 (unsigned long long)tmp_sg->dma_address,
2378 			 tmp_sg->length, page_off);
2379 
2380 		ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2381 		ib_sge->length = min_t(u32, data_left,
2382 				ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2383 		ib_sge->lkey = isert_conn->conn_mr->lkey;
2384 
2385 		pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2386 			 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2387 		page_off = 0;
2388 		data_left -= ib_sge->length;
2389 		ib_sge++;
2390 		pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2391 	}
2392 
2393 	pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2394 		 send_wr->sg_list, send_wr->num_sge);
2395 
2396 	return sg_nents;
2397 }
2398 
2399 static int
2400 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2401 	       struct isert_rdma_wr *wr)
2402 {
2403 	struct se_cmd *se_cmd = &cmd->se_cmd;
2404 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2405 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2406 	struct isert_data_buf *data = &wr->data;
2407 	struct ib_send_wr *send_wr;
2408 	struct ib_sge *ib_sge;
2409 	u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2410 	int ret = 0, i, ib_sge_cnt;
2411 
2412 	isert_cmd->tx_desc.isert_cmd = isert_cmd;
2413 
2414 	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2415 	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2416 				 se_cmd->t_data_nents, se_cmd->data_length,
2417 				 offset, wr->iser_ib_op, &wr->data);
2418 	if (ret)
2419 		return ret;
2420 
2421 	data_left = data->len;
2422 	offset = data->offset;
2423 
2424 	ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2425 	if (!ib_sge) {
2426 		pr_warn("Unable to allocate ib_sge\n");
2427 		ret = -ENOMEM;
2428 		goto unmap_cmd;
2429 	}
2430 	wr->ib_sge = ib_sge;
2431 
2432 	wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2433 	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2434 				GFP_KERNEL);
2435 	if (!wr->send_wr) {
2436 		pr_debug("Unable to allocate wr->send_wr\n");
2437 		ret = -ENOMEM;
2438 		goto unmap_cmd;
2439 	}
2440 
2441 	wr->isert_cmd = isert_cmd;
2442 	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2443 
2444 	for (i = 0; i < wr->send_wr_num; i++) {
2445 		send_wr = &isert_cmd->rdma_wr.send_wr[i];
2446 		data_len = min(data_left, rdma_write_max);
2447 
2448 		send_wr->send_flags = 0;
2449 		if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2450 			send_wr->opcode = IB_WR_RDMA_WRITE;
2451 			send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2452 			send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2453 			if (i + 1 == wr->send_wr_num)
2454 				send_wr->next = &isert_cmd->tx_desc.send_wr;
2455 			else
2456 				send_wr->next = &wr->send_wr[i + 1];
2457 		} else {
2458 			send_wr->opcode = IB_WR_RDMA_READ;
2459 			send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2460 			send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2461 			if (i + 1 == wr->send_wr_num)
2462 				send_wr->send_flags = IB_SEND_SIGNALED;
2463 			else
2464 				send_wr->next = &wr->send_wr[i + 1];
2465 		}
2466 
2467 		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2468 					send_wr, data_len, offset);
2469 		ib_sge += ib_sge_cnt;
2470 
2471 		offset += data_len;
2472 		va_offset += data_len;
2473 		data_left -= data_len;
2474 	}
2475 
2476 	return 0;
2477 unmap_cmd:
2478 	isert_unmap_data_buf(isert_conn, data);
2479 
2480 	return ret;
2481 }
2482 
2483 static int
2484 isert_map_fr_pagelist(struct ib_device *ib_dev,
2485 		      struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2486 {
2487 	u64 start_addr, end_addr, page, chunk_start = 0;
2488 	struct scatterlist *tmp_sg;
2489 	int i = 0, new_chunk, last_ent, n_pages;
2490 
2491 	n_pages = 0;
2492 	new_chunk = 1;
2493 	last_ent = sg_nents - 1;
2494 	for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2495 		start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2496 		if (new_chunk)
2497 			chunk_start = start_addr;
2498 		end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2499 
2500 		pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2501 			 i, (unsigned long long)tmp_sg->dma_address,
2502 			 tmp_sg->length);
2503 
2504 		if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2505 			new_chunk = 0;
2506 			continue;
2507 		}
2508 		new_chunk = 1;
2509 
2510 		page = chunk_start & PAGE_MASK;
2511 		do {
2512 			fr_pl[n_pages++] = page;
2513 			pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2514 				 n_pages - 1, page);
2515 			page += PAGE_SIZE;
2516 		} while (page < end_addr);
2517 	}
2518 
2519 	return n_pages;
2520 }
2521 
2522 static int
2523 isert_fast_reg_mr(struct isert_conn *isert_conn,
2524 		  struct fast_reg_descriptor *fr_desc,
2525 		  struct isert_data_buf *mem,
2526 		  enum isert_indicator ind,
2527 		  struct ib_sge *sge)
2528 {
2529 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2530 	struct ib_mr *mr;
2531 	struct ib_fast_reg_page_list *frpl;
2532 	struct ib_send_wr fr_wr, inv_wr;
2533 	struct ib_send_wr *bad_wr, *wr = NULL;
2534 	int ret, pagelist_len;
2535 	u32 page_off;
2536 	u8 key;
2537 
2538 	if (mem->dma_nents == 1) {
2539 		sge->lkey = isert_conn->conn_mr->lkey;
2540 		sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2541 		sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2542 		pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
2543 			 __func__, __LINE__, sge->addr, sge->length,
2544 			 sge->lkey);
2545 		return 0;
2546 	}
2547 
2548 	if (ind == ISERT_DATA_KEY_VALID) {
2549 		/* Registering data buffer */
2550 		mr = fr_desc->data_mr;
2551 		frpl = fr_desc->data_frpl;
2552 	} else {
2553 		/* Registering protection buffer */
2554 		mr = fr_desc->pi_ctx->prot_mr;
2555 		frpl = fr_desc->pi_ctx->prot_frpl;
2556 	}
2557 
2558 	page_off = mem->offset % PAGE_SIZE;
2559 
2560 	pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2561 		 fr_desc, mem->nents, mem->offset);
2562 
2563 	pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2564 					     &frpl->page_list[0]);
2565 
2566 	if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
2567 		memset(&inv_wr, 0, sizeof(inv_wr));
2568 		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2569 		inv_wr.opcode = IB_WR_LOCAL_INV;
2570 		inv_wr.ex.invalidate_rkey = mr->rkey;
2571 		wr = &inv_wr;
2572 		/* Bump the key */
2573 		key = (u8)(mr->rkey & 0x000000FF);
2574 		ib_update_fast_reg_key(mr, ++key);
2575 	}
2576 
2577 	/* Prepare FASTREG WR */
2578 	memset(&fr_wr, 0, sizeof(fr_wr));
2579 	fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2580 	fr_wr.opcode = IB_WR_FAST_REG_MR;
2581 	fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2582 	fr_wr.wr.fast_reg.page_list = frpl;
2583 	fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2584 	fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2585 	fr_wr.wr.fast_reg.length = mem->len;
2586 	fr_wr.wr.fast_reg.rkey = mr->rkey;
2587 	fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2588 
2589 	if (!wr)
2590 		wr = &fr_wr;
2591 	else
2592 		wr->next = &fr_wr;
2593 
2594 	ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2595 	if (ret) {
2596 		pr_err("fast registration failed, ret:%d\n", ret);
2597 		return ret;
2598 	}
2599 	fr_desc->ind &= ~ind;
2600 
2601 	sge->lkey = mr->lkey;
2602 	sge->addr = frpl->page_list[0] + page_off;
2603 	sge->length = mem->len;
2604 
2605 	pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
2606 		 __func__, __LINE__, sge->addr, sge->length,
2607 		 sge->lkey);
2608 
2609 	return ret;
2610 }
2611 
2612 static inline void
2613 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2614 		     struct ib_sig_domain *domain)
2615 {
2616 	domain->sig_type = IB_SIG_TYPE_T10_DIF;
2617 	domain->sig.dif.bg_type = IB_T10DIF_CRC;
2618 	domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2619 	domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2620 	/*
2621 	 * At the moment we hard code those, but if in the future
2622 	 * the target core would like to use it, we will take it
2623 	 * from se_cmd.
2624 	 */
2625 	domain->sig.dif.apptag_check_mask = 0xffff;
2626 	domain->sig.dif.app_escape = true;
2627 	domain->sig.dif.ref_escape = true;
2628 	if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2629 	    se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2630 		domain->sig.dif.ref_remap = true;
2631 };
2632 
2633 static int
2634 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2635 {
2636 	switch (se_cmd->prot_op) {
2637 	case TARGET_PROT_DIN_INSERT:
2638 	case TARGET_PROT_DOUT_STRIP:
2639 		sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2640 		isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2641 		break;
2642 	case TARGET_PROT_DOUT_INSERT:
2643 	case TARGET_PROT_DIN_STRIP:
2644 		sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2645 		isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2646 		break;
2647 	case TARGET_PROT_DIN_PASS:
2648 	case TARGET_PROT_DOUT_PASS:
2649 		isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2650 		isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2651 		break;
2652 	default:
2653 		pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2654 		return -EINVAL;
2655 	}
2656 
2657 	return 0;
2658 }
2659 
2660 static inline u8
2661 isert_set_prot_checks(u8 prot_checks)
2662 {
2663 	return (prot_checks & TARGET_DIF_CHECK_GUARD  ? 0xc0 : 0) |
2664 	       (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2665 	       (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2666 }
2667 
2668 static int
2669 isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2670 		 struct fast_reg_descriptor *fr_desc,
2671 		 struct ib_sge *data_sge, struct ib_sge *prot_sge,
2672 		 struct ib_sge *sig_sge)
2673 {
2674 	struct ib_send_wr sig_wr, inv_wr;
2675 	struct ib_send_wr *bad_wr, *wr = NULL;
2676 	struct pi_context *pi_ctx = fr_desc->pi_ctx;
2677 	struct ib_sig_attrs sig_attrs;
2678 	int ret;
2679 	u32 key;
2680 
2681 	memset(&sig_attrs, 0, sizeof(sig_attrs));
2682 	ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2683 	if (ret)
2684 		goto err;
2685 
2686 	sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2687 
2688 	if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2689 		memset(&inv_wr, 0, sizeof(inv_wr));
2690 		inv_wr.opcode = IB_WR_LOCAL_INV;
2691 		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2692 		inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2693 		wr = &inv_wr;
2694 		/* Bump the key */
2695 		key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2696 		ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2697 	}
2698 
2699 	memset(&sig_wr, 0, sizeof(sig_wr));
2700 	sig_wr.opcode = IB_WR_REG_SIG_MR;
2701 	sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2702 	sig_wr.sg_list = data_sge;
2703 	sig_wr.num_sge = 1;
2704 	sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2705 	sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2706 	sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2707 	if (se_cmd->t_prot_sg)
2708 		sig_wr.wr.sig_handover.prot = prot_sge;
2709 
2710 	if (!wr)
2711 		wr = &sig_wr;
2712 	else
2713 		wr->next = &sig_wr;
2714 
2715 	ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2716 	if (ret) {
2717 		pr_err("fast registration failed, ret:%d\n", ret);
2718 		goto err;
2719 	}
2720 	fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2721 
2722 	sig_sge->lkey = pi_ctx->sig_mr->lkey;
2723 	sig_sge->addr = 0;
2724 	sig_sge->length = se_cmd->data_length;
2725 	if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2726 	    se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2727 		/*
2728 		 * We have protection guards on the wire
2729 		 * so we need to set a larget transfer
2730 		 */
2731 		sig_sge->length += se_cmd->prot_length;
2732 
2733 	pr_debug("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
2734 		 sig_sge->addr, sig_sge->length,
2735 		 sig_sge->lkey);
2736 err:
2737 	return ret;
2738 }
2739 
2740 static int
2741 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2742 	       struct isert_rdma_wr *wr)
2743 {
2744 	struct se_cmd *se_cmd = &cmd->se_cmd;
2745 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2746 	struct isert_conn *isert_conn = conn->context;
2747 	struct ib_sge data_sge;
2748 	struct ib_send_wr *send_wr;
2749 	struct fast_reg_descriptor *fr_desc = NULL;
2750 	u32 offset;
2751 	int ret = 0;
2752 	unsigned long flags;
2753 
2754 	isert_cmd->tx_desc.isert_cmd = isert_cmd;
2755 
2756 	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2757 	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2758 				 se_cmd->t_data_nents, se_cmd->data_length,
2759 				 offset, wr->iser_ib_op, &wr->data);
2760 	if (ret)
2761 		return ret;
2762 
2763 	if (wr->data.dma_nents != 1 ||
2764 	    se_cmd->prot_op != TARGET_PROT_NORMAL) {
2765 		spin_lock_irqsave(&isert_conn->conn_lock, flags);
2766 		fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2767 					   struct fast_reg_descriptor, list);
2768 		list_del(&fr_desc->list);
2769 		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2770 		wr->fr_desc = fr_desc;
2771 	}
2772 
2773 	ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2774 				ISERT_DATA_KEY_VALID, &data_sge);
2775 	if (ret)
2776 		goto unmap_cmd;
2777 
2778 	if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2779 		struct ib_sge prot_sge, sig_sge;
2780 
2781 		if (se_cmd->t_prot_sg) {
2782 			ret = isert_map_data_buf(isert_conn, isert_cmd,
2783 						 se_cmd->t_prot_sg,
2784 						 se_cmd->t_prot_nents,
2785 						 se_cmd->prot_length,
2786 						 0, wr->iser_ib_op, &wr->prot);
2787 			if (ret)
2788 				goto unmap_cmd;
2789 
2790 			ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2791 						ISERT_PROT_KEY_VALID, &prot_sge);
2792 			if (ret)
2793 				goto unmap_prot_cmd;
2794 		}
2795 
2796 		ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2797 				       &data_sge, &prot_sge, &sig_sge);
2798 		if (ret)
2799 			goto unmap_prot_cmd;
2800 
2801 		fr_desc->ind |= ISERT_PROTECTED;
2802 		memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2803 	} else
2804 		memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2805 
2806 	wr->ib_sge = &wr->s_ib_sge;
2807 	wr->send_wr_num = 1;
2808 	memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2809 	wr->send_wr = &wr->s_send_wr;
2810 	wr->isert_cmd = isert_cmd;
2811 
2812 	send_wr = &isert_cmd->rdma_wr.s_send_wr;
2813 	send_wr->sg_list = &wr->s_ib_sge;
2814 	send_wr->num_sge = 1;
2815 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2816 	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2817 		send_wr->opcode = IB_WR_RDMA_WRITE;
2818 		send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2819 		send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2820 		send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2821 				      0 : IB_SEND_SIGNALED;
2822 	} else {
2823 		send_wr->opcode = IB_WR_RDMA_READ;
2824 		send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2825 		send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2826 		send_wr->send_flags = IB_SEND_SIGNALED;
2827 	}
2828 
2829 	return 0;
2830 unmap_prot_cmd:
2831 	if (se_cmd->t_prot_sg)
2832 		isert_unmap_data_buf(isert_conn, &wr->prot);
2833 unmap_cmd:
2834 	if (fr_desc) {
2835 		spin_lock_irqsave(&isert_conn->conn_lock, flags);
2836 		list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2837 		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2838 	}
2839 	isert_unmap_data_buf(isert_conn, &wr->data);
2840 
2841 	return ret;
2842 }
2843 
2844 static int
2845 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2846 {
2847 	struct se_cmd *se_cmd = &cmd->se_cmd;
2848 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2849 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2850 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2851 	struct isert_device *device = isert_conn->conn_device;
2852 	struct ib_send_wr *wr_failed;
2853 	int rc;
2854 
2855 	pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2856 		 isert_cmd, se_cmd->data_length);
2857 	wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2858 	rc = device->reg_rdma_mem(conn, cmd, wr);
2859 	if (rc) {
2860 		pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2861 		return rc;
2862 	}
2863 
2864 	if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
2865 		/*
2866 		 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2867 		 */
2868 		isert_create_send_desc(isert_conn, isert_cmd,
2869 				       &isert_cmd->tx_desc);
2870 		iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2871 				     &isert_cmd->tx_desc.iscsi_header);
2872 		isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2873 		isert_init_send_wr(isert_conn, isert_cmd,
2874 				   &isert_cmd->tx_desc.send_wr, false);
2875 		isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2876 		wr->send_wr_num += 1;
2877 	}
2878 
2879 	atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2880 
2881 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2882 	if (rc) {
2883 		pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2884 		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2885 	}
2886 
2887 	if (se_cmd->prot_op == TARGET_PROT_NORMAL)
2888 		pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2889 			 "READ\n", isert_cmd);
2890 	else
2891 		pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2892 			 isert_cmd);
2893 
2894 	return 1;
2895 }
2896 
2897 static int
2898 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2899 {
2900 	struct se_cmd *se_cmd = &cmd->se_cmd;
2901 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2902 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2903 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2904 	struct isert_device *device = isert_conn->conn_device;
2905 	struct ib_send_wr *wr_failed;
2906 	int rc;
2907 
2908 	pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2909 		 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2910 	wr->iser_ib_op = ISER_IB_RDMA_READ;
2911 	rc = device->reg_rdma_mem(conn, cmd, wr);
2912 	if (rc) {
2913 		pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2914 		return rc;
2915 	}
2916 
2917 	atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2918 
2919 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2920 	if (rc) {
2921 		pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2922 		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2923 	}
2924 	pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2925 		 isert_cmd);
2926 
2927 	return 0;
2928 }
2929 
2930 static int
2931 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2932 {
2933 	int ret;
2934 
2935 	switch (state) {
2936 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2937 		ret = isert_put_nopin(cmd, conn, false);
2938 		break;
2939 	default:
2940 		pr_err("Unknown immediate state: 0x%02x\n", state);
2941 		ret = -EINVAL;
2942 		break;
2943 	}
2944 
2945 	return ret;
2946 }
2947 
2948 static int
2949 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2950 {
2951 	int ret;
2952 
2953 	switch (state) {
2954 	case ISTATE_SEND_LOGOUTRSP:
2955 		ret = isert_put_logout_rsp(cmd, conn);
2956 		if (!ret) {
2957 			pr_debug("Returning iSER Logout -EAGAIN\n");
2958 			ret = -EAGAIN;
2959 		}
2960 		break;
2961 	case ISTATE_SEND_NOPIN:
2962 		ret = isert_put_nopin(cmd, conn, true);
2963 		break;
2964 	case ISTATE_SEND_TASKMGTRSP:
2965 		ret = isert_put_tm_rsp(cmd, conn);
2966 		break;
2967 	case ISTATE_SEND_REJECT:
2968 		ret = isert_put_reject(cmd, conn);
2969 		break;
2970 	case ISTATE_SEND_TEXTRSP:
2971 		ret = isert_put_text_rsp(cmd, conn);
2972 		break;
2973 	case ISTATE_SEND_STATUS:
2974 		/*
2975 		 * Special case for sending non GOOD SCSI status from TX thread
2976 		 * context during pre se_cmd excecution failure.
2977 		 */
2978 		ret = isert_put_response(conn, cmd);
2979 		break;
2980 	default:
2981 		pr_err("Unknown response state: 0x%02x\n", state);
2982 		ret = -EINVAL;
2983 		break;
2984 	}
2985 
2986 	return ret;
2987 }
2988 
2989 static int
2990 isert_setup_np(struct iscsi_np *np,
2991 	       struct __kernel_sockaddr_storage *ksockaddr)
2992 {
2993 	struct isert_np *isert_np;
2994 	struct rdma_cm_id *isert_lid;
2995 	struct sockaddr *sa;
2996 	int ret;
2997 
2998 	isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2999 	if (!isert_np) {
3000 		pr_err("Unable to allocate struct isert_np\n");
3001 		return -ENOMEM;
3002 	}
3003 	sema_init(&isert_np->np_sem, 0);
3004 	mutex_init(&isert_np->np_accept_mutex);
3005 	INIT_LIST_HEAD(&isert_np->np_accept_list);
3006 	init_completion(&isert_np->np_login_comp);
3007 
3008 	sa = (struct sockaddr *)ksockaddr;
3009 	pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
3010 	/*
3011 	 * Setup the np->np_sockaddr from the passed sockaddr setup
3012 	 * in iscsi_target_configfs.c code..
3013 	 */
3014 	memcpy(&np->np_sockaddr, ksockaddr,
3015 	       sizeof(struct __kernel_sockaddr_storage));
3016 
3017 	isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
3018 				IB_QPT_RC);
3019 	if (IS_ERR(isert_lid)) {
3020 		pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
3021 		       PTR_ERR(isert_lid));
3022 		ret = PTR_ERR(isert_lid);
3023 		goto out;
3024 	}
3025 
3026 	ret = rdma_bind_addr(isert_lid, sa);
3027 	if (ret) {
3028 		pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
3029 		goto out_lid;
3030 	}
3031 
3032 	ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
3033 	if (ret) {
3034 		pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
3035 		goto out_lid;
3036 	}
3037 
3038 	isert_np->np_cm_id = isert_lid;
3039 	np->np_context = isert_np;
3040 	pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
3041 
3042 	return 0;
3043 
3044 out_lid:
3045 	rdma_destroy_id(isert_lid);
3046 out:
3047 	kfree(isert_np);
3048 	return ret;
3049 }
3050 
3051 static int
3052 isert_rdma_accept(struct isert_conn *isert_conn)
3053 {
3054 	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3055 	struct rdma_conn_param cp;
3056 	int ret;
3057 
3058 	memset(&cp, 0, sizeof(struct rdma_conn_param));
3059 	cp.initiator_depth = isert_conn->initiator_depth;
3060 	cp.retry_count = 7;
3061 	cp.rnr_retry_count = 7;
3062 
3063 	pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3064 
3065 	ret = rdma_accept(cm_id, &cp);
3066 	if (ret) {
3067 		pr_err("rdma_accept() failed with: %d\n", ret);
3068 		return ret;
3069 	}
3070 
3071 	pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3072 
3073 	return 0;
3074 }
3075 
3076 static int
3077 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3078 {
3079 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3080 	int ret;
3081 
3082 	pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
3083 	/*
3084 	 * For login requests after the first PDU, isert_rx_login_req() will
3085 	 * kick schedule_delayed_work(&conn->login_work) as the packet is
3086 	 * received, which turns this callback from iscsi_target_do_login_rx()
3087 	 * into a NOP.
3088 	 */
3089 	if (!login->first_request)
3090 		return 0;
3091 
3092 	ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3093 	if (ret)
3094 		return ret;
3095 
3096 	pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
3097 	return 0;
3098 }
3099 
3100 static void
3101 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3102 		    struct isert_conn *isert_conn)
3103 {
3104 	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3105 	struct rdma_route *cm_route = &cm_id->route;
3106 	struct sockaddr_in *sock_in;
3107 	struct sockaddr_in6 *sock_in6;
3108 
3109 	conn->login_family = np->np_sockaddr.ss_family;
3110 
3111 	if (np->np_sockaddr.ss_family == AF_INET6) {
3112 		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3113 		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3114 			 &sock_in6->sin6_addr.in6_u);
3115 		conn->login_port = ntohs(sock_in6->sin6_port);
3116 
3117 		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3118 		snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3119 			 &sock_in6->sin6_addr.in6_u);
3120 		conn->local_port = ntohs(sock_in6->sin6_port);
3121 	} else {
3122 		sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3123 		sprintf(conn->login_ip, "%pI4",
3124 			&sock_in->sin_addr.s_addr);
3125 		conn->login_port = ntohs(sock_in->sin_port);
3126 
3127 		sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3128 		sprintf(conn->local_ip, "%pI4",
3129 			&sock_in->sin_addr.s_addr);
3130 		conn->local_port = ntohs(sock_in->sin_port);
3131 	}
3132 }
3133 
3134 static int
3135 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3136 {
3137 	struct isert_np *isert_np = (struct isert_np *)np->np_context;
3138 	struct isert_conn *isert_conn;
3139 	int max_accept = 0, ret;
3140 
3141 accept_wait:
3142 	ret = down_interruptible(&isert_np->np_sem);
3143 	if (ret || max_accept > 5)
3144 		return -ENODEV;
3145 
3146 	spin_lock_bh(&np->np_thread_lock);
3147 	if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3148 		spin_unlock_bh(&np->np_thread_lock);
3149 		pr_debug("np_thread_state %d for isert_accept_np\n",
3150 			 np->np_thread_state);
3151 		/**
3152 		 * No point in stalling here when np_thread
3153 		 * is in state RESET/SHUTDOWN/EXIT - bail
3154 		 **/
3155 		return -ENODEV;
3156 	}
3157 	spin_unlock_bh(&np->np_thread_lock);
3158 
3159 	mutex_lock(&isert_np->np_accept_mutex);
3160 	if (list_empty(&isert_np->np_accept_list)) {
3161 		mutex_unlock(&isert_np->np_accept_mutex);
3162 		max_accept++;
3163 		goto accept_wait;
3164 	}
3165 	isert_conn = list_first_entry(&isert_np->np_accept_list,
3166 			struct isert_conn, conn_accept_node);
3167 	list_del_init(&isert_conn->conn_accept_node);
3168 	mutex_unlock(&isert_np->np_accept_mutex);
3169 
3170 	conn->context = isert_conn;
3171 	isert_conn->conn = conn;
3172 	max_accept = 0;
3173 
3174 	ret = isert_rdma_post_recvl(isert_conn);
3175 	if (ret)
3176 		return ret;
3177 
3178 	ret = isert_rdma_accept(isert_conn);
3179 	if (ret)
3180 		return ret;
3181 
3182 	isert_set_conn_info(np, conn, isert_conn);
3183 
3184 	pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
3185 	return 0;
3186 }
3187 
3188 static void
3189 isert_free_np(struct iscsi_np *np)
3190 {
3191 	struct isert_np *isert_np = (struct isert_np *)np->np_context;
3192 
3193 	rdma_destroy_id(isert_np->np_cm_id);
3194 
3195 	np->np_context = NULL;
3196 	kfree(isert_np);
3197 }
3198 
3199 static void isert_wait_conn(struct iscsi_conn *conn)
3200 {
3201 	struct isert_conn *isert_conn = conn->context;
3202 
3203 	pr_debug("isert_wait_conn: Starting \n");
3204 
3205 	mutex_lock(&isert_conn->conn_mutex);
3206 	if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
3207 		pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3208 		rdma_disconnect(isert_conn->conn_cm_id);
3209 	}
3210 	/*
3211 	 * Only wait for conn_wait_comp_err if the isert_conn made it
3212 	 * into full feature phase..
3213 	 */
3214 	if (isert_conn->state == ISER_CONN_INIT) {
3215 		mutex_unlock(&isert_conn->conn_mutex);
3216 		return;
3217 	}
3218 	if (isert_conn->state == ISER_CONN_UP)
3219 		isert_conn->state = ISER_CONN_TERMINATING;
3220 	mutex_unlock(&isert_conn->conn_mutex);
3221 
3222 	wait_for_completion(&isert_conn->conn_wait_comp_err);
3223 
3224 	wait_for_completion(&isert_conn->conn_wait);
3225 	isert_put_conn(isert_conn);
3226 }
3227 
3228 static void isert_free_conn(struct iscsi_conn *conn)
3229 {
3230 	struct isert_conn *isert_conn = conn->context;
3231 
3232 	isert_put_conn(isert_conn);
3233 }
3234 
3235 static struct iscsit_transport iser_target_transport = {
3236 	.name			= "IB/iSER",
3237 	.transport_type		= ISCSI_INFINIBAND,
3238 	.priv_size		= sizeof(struct isert_cmd),
3239 	.owner			= THIS_MODULE,
3240 	.iscsit_setup_np	= isert_setup_np,
3241 	.iscsit_accept_np	= isert_accept_np,
3242 	.iscsit_free_np		= isert_free_np,
3243 	.iscsit_wait_conn	= isert_wait_conn,
3244 	.iscsit_free_conn	= isert_free_conn,
3245 	.iscsit_get_login_rx	= isert_get_login_rx,
3246 	.iscsit_put_login_tx	= isert_put_login_tx,
3247 	.iscsit_immediate_queue	= isert_immediate_queue,
3248 	.iscsit_response_queue	= isert_response_queue,
3249 	.iscsit_get_dataout	= isert_get_dataout,
3250 	.iscsit_queue_data_in	= isert_put_datain,
3251 	.iscsit_queue_status	= isert_put_response,
3252 	.iscsit_aborted_task	= isert_aborted_task,
3253 	.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3254 };
3255 
3256 static int __init isert_init(void)
3257 {
3258 	int ret;
3259 
3260 	isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
3261 	if (!isert_rx_wq) {
3262 		pr_err("Unable to allocate isert_rx_wq\n");
3263 		return -ENOMEM;
3264 	}
3265 
3266 	isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3267 	if (!isert_comp_wq) {
3268 		pr_err("Unable to allocate isert_comp_wq\n");
3269 		ret = -ENOMEM;
3270 		goto destroy_rx_wq;
3271 	}
3272 
3273 	iscsit_register_transport(&iser_target_transport);
3274 	pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
3275 	return 0;
3276 
3277 destroy_rx_wq:
3278 	destroy_workqueue(isert_rx_wq);
3279 	return ret;
3280 }
3281 
3282 static void __exit isert_exit(void)
3283 {
3284 	flush_scheduled_work();
3285 	destroy_workqueue(isert_comp_wq);
3286 	destroy_workqueue(isert_rx_wq);
3287 	iscsit_unregister_transport(&iser_target_transport);
3288 	pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
3289 }
3290 
3291 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3292 MODULE_VERSION("0.1");
3293 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3294 MODULE_LICENSE("GPL");
3295 
3296 module_init(isert_init);
3297 module_exit(isert_exit);
3298