1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18 
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
31 
32 #include "isert_proto.h"
33 #include "ib_isert.h"
34 
35 #define	ISERT_MAX_CONN		8
36 #define ISER_MAX_RX_CQ_LEN	(ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN	(ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
38 
39 static DEFINE_MUTEX(device_list_mutex);
40 static LIST_HEAD(device_list);
41 static struct workqueue_struct *isert_rx_wq;
42 static struct workqueue_struct *isert_comp_wq;
43 
44 static void
45 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
46 static int
47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48 	       struct isert_rdma_wr *wr);
49 static void
50 isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51 static int
52 isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53 		    struct isert_rdma_wr *wr);
54 
55 static void
56 isert_qp_event_callback(struct ib_event *e, void *context)
57 {
58 	struct isert_conn *isert_conn = (struct isert_conn *)context;
59 
60 	pr_err("isert_qp_event_callback event: %d\n", e->event);
61 	switch (e->event) {
62 	case IB_EVENT_COMM_EST:
63 		rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
64 		break;
65 	case IB_EVENT_QP_LAST_WQE_REACHED:
66 		pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
67 		break;
68 	default:
69 		break;
70 	}
71 }
72 
73 static int
74 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
75 {
76 	int ret;
77 
78 	ret = ib_query_device(ib_dev, devattr);
79 	if (ret) {
80 		pr_err("ib_query_device() failed: %d\n", ret);
81 		return ret;
82 	}
83 	pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
84 	pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
85 
86 	return 0;
87 }
88 
89 static int
90 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
91 {
92 	struct isert_device *device = isert_conn->conn_device;
93 	struct ib_qp_init_attr attr;
94 	int ret, index, min_index = 0;
95 
96 	mutex_lock(&device_list_mutex);
97 	for (index = 0; index < device->cqs_used; index++)
98 		if (device->cq_active_qps[index] <
99 		    device->cq_active_qps[min_index])
100 			min_index = index;
101 	device->cq_active_qps[min_index]++;
102 	pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
103 	mutex_unlock(&device_list_mutex);
104 
105 	memset(&attr, 0, sizeof(struct ib_qp_init_attr));
106 	attr.event_handler = isert_qp_event_callback;
107 	attr.qp_context = isert_conn;
108 	attr.send_cq = device->dev_tx_cq[min_index];
109 	attr.recv_cq = device->dev_rx_cq[min_index];
110 	attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
111 	attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
112 	/*
113 	 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
114 	 * work-around for RDMA_READ..
115 	 */
116 	attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
117 	isert_conn->max_sge = attr.cap.max_send_sge;
118 
119 	attr.cap.max_recv_sge = 1;
120 	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121 	attr.qp_type = IB_QPT_RC;
122 
123 	pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124 		 cma_id->device);
125 	pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
126 		 isert_conn->conn_pd->device);
127 
128 	ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
129 	if (ret) {
130 		pr_err("rdma_create_qp failed for cma_id %d\n", ret);
131 		return ret;
132 	}
133 	isert_conn->conn_qp = cma_id->qp;
134 	pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135 
136 	return 0;
137 }
138 
139 static void
140 isert_cq_event_callback(struct ib_event *e, void *context)
141 {
142 	pr_debug("isert_cq_event_callback event: %d\n", e->event);
143 }
144 
145 static int
146 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147 {
148 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
149 	struct iser_rx_desc *rx_desc;
150 	struct ib_sge *rx_sg;
151 	u64 dma_addr;
152 	int i, j;
153 
154 	isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
155 				sizeof(struct iser_rx_desc), GFP_KERNEL);
156 	if (!isert_conn->conn_rx_descs)
157 		goto fail;
158 
159 	rx_desc = isert_conn->conn_rx_descs;
160 
161 	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
162 		dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
163 					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
164 		if (ib_dma_mapping_error(ib_dev, dma_addr))
165 			goto dma_map_fail;
166 
167 		rx_desc->dma_addr = dma_addr;
168 
169 		rx_sg = &rx_desc->rx_sg;
170 		rx_sg->addr = rx_desc->dma_addr;
171 		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
172 		rx_sg->lkey = isert_conn->conn_mr->lkey;
173 	}
174 
175 	isert_conn->conn_rx_desc_head = 0;
176 	return 0;
177 
178 dma_map_fail:
179 	rx_desc = isert_conn->conn_rx_descs;
180 	for (j = 0; j < i; j++, rx_desc++) {
181 		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
182 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
183 	}
184 	kfree(isert_conn->conn_rx_descs);
185 	isert_conn->conn_rx_descs = NULL;
186 fail:
187 	return -ENOMEM;
188 }
189 
190 static void
191 isert_free_rx_descriptors(struct isert_conn *isert_conn)
192 {
193 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
194 	struct iser_rx_desc *rx_desc;
195 	int i;
196 
197 	if (!isert_conn->conn_rx_descs)
198 		return;
199 
200 	rx_desc = isert_conn->conn_rx_descs;
201 	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
202 		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
203 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
204 	}
205 
206 	kfree(isert_conn->conn_rx_descs);
207 	isert_conn->conn_rx_descs = NULL;
208 }
209 
210 static void isert_cq_tx_work(struct work_struct *);
211 static void isert_cq_tx_callback(struct ib_cq *, void *);
212 static void isert_cq_rx_work(struct work_struct *);
213 static void isert_cq_rx_callback(struct ib_cq *, void *);
214 
215 static int
216 isert_create_device_ib_res(struct isert_device *device)
217 {
218 	struct ib_device *ib_dev = device->ib_device;
219 	struct isert_cq_desc *cq_desc;
220 	struct ib_device_attr *dev_attr;
221 	int ret = 0, i, j;
222 
223 	dev_attr = &device->dev_attr;
224 	ret = isert_query_device(ib_dev, dev_attr);
225 	if (ret)
226 		return ret;
227 
228 	/* asign function handlers */
229 	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
230 		device->use_frwr = 1;
231 		device->reg_rdma_mem = isert_reg_rdma_frwr;
232 		device->unreg_rdma_mem = isert_unreg_rdma_frwr;
233 	} else {
234 		device->use_frwr = 0;
235 		device->reg_rdma_mem = isert_map_rdma;
236 		device->unreg_rdma_mem = isert_unmap_cmd;
237 	}
238 
239 	device->cqs_used = min_t(int, num_online_cpus(),
240 				 device->ib_device->num_comp_vectors);
241 	device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242 	pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
243 		 device->cqs_used, device->ib_device->name,
244 		 device->ib_device->num_comp_vectors, device->use_frwr);
245 	device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
246 				device->cqs_used, GFP_KERNEL);
247 	if (!device->cq_desc) {
248 		pr_err("Unable to allocate device->cq_desc\n");
249 		return -ENOMEM;
250 	}
251 	cq_desc = device->cq_desc;
252 
253 	device->dev_pd = ib_alloc_pd(ib_dev);
254 	if (IS_ERR(device->dev_pd)) {
255 		ret = PTR_ERR(device->dev_pd);
256 		pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
257 		goto out_cq_desc;
258 	}
259 
260 	for (i = 0; i < device->cqs_used; i++) {
261 		cq_desc[i].device = device;
262 		cq_desc[i].cq_index = i;
263 
264 		INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
265 		device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
266 						isert_cq_rx_callback,
267 						isert_cq_event_callback,
268 						(void *)&cq_desc[i],
269 						ISER_MAX_RX_CQ_LEN, i);
270 		if (IS_ERR(device->dev_rx_cq[i])) {
271 			ret = PTR_ERR(device->dev_rx_cq[i]);
272 			device->dev_rx_cq[i] = NULL;
273 			goto out_cq;
274 		}
275 
276 		INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
277 		device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
278 						isert_cq_tx_callback,
279 						isert_cq_event_callback,
280 						(void *)&cq_desc[i],
281 						ISER_MAX_TX_CQ_LEN, i);
282 		if (IS_ERR(device->dev_tx_cq[i])) {
283 			ret = PTR_ERR(device->dev_tx_cq[i]);
284 			device->dev_tx_cq[i] = NULL;
285 			goto out_cq;
286 		}
287 
288 		ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
289 		if (ret)
290 			goto out_cq;
291 
292 		ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
293 		if (ret)
294 			goto out_cq;
295 	}
296 
297 	device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
298 	if (IS_ERR(device->dev_mr)) {
299 		ret = PTR_ERR(device->dev_mr);
300 		pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
301 		goto out_cq;
302 	}
303 
304 	return 0;
305 
306 out_cq:
307 	for (j = 0; j < i; j++) {
308 		cq_desc = &device->cq_desc[j];
309 
310 		if (device->dev_rx_cq[j]) {
311 			cancel_work_sync(&cq_desc->cq_rx_work);
312 			ib_destroy_cq(device->dev_rx_cq[j]);
313 		}
314 		if (device->dev_tx_cq[j]) {
315 			cancel_work_sync(&cq_desc->cq_tx_work);
316 			ib_destroy_cq(device->dev_tx_cq[j]);
317 		}
318 	}
319 	ib_dealloc_pd(device->dev_pd);
320 
321 out_cq_desc:
322 	kfree(device->cq_desc);
323 
324 	return ret;
325 }
326 
327 static void
328 isert_free_device_ib_res(struct isert_device *device)
329 {
330 	struct isert_cq_desc *cq_desc;
331 	int i;
332 
333 	for (i = 0; i < device->cqs_used; i++) {
334 		cq_desc = &device->cq_desc[i];
335 
336 		cancel_work_sync(&cq_desc->cq_rx_work);
337 		cancel_work_sync(&cq_desc->cq_tx_work);
338 		ib_destroy_cq(device->dev_rx_cq[i]);
339 		ib_destroy_cq(device->dev_tx_cq[i]);
340 		device->dev_rx_cq[i] = NULL;
341 		device->dev_tx_cq[i] = NULL;
342 	}
343 
344 	ib_dereg_mr(device->dev_mr);
345 	ib_dealloc_pd(device->dev_pd);
346 	kfree(device->cq_desc);
347 }
348 
349 static void
350 isert_device_try_release(struct isert_device *device)
351 {
352 	mutex_lock(&device_list_mutex);
353 	device->refcount--;
354 	if (!device->refcount) {
355 		isert_free_device_ib_res(device);
356 		list_del(&device->dev_node);
357 		kfree(device);
358 	}
359 	mutex_unlock(&device_list_mutex);
360 }
361 
362 static struct isert_device *
363 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
364 {
365 	struct isert_device *device;
366 	int ret;
367 
368 	mutex_lock(&device_list_mutex);
369 	list_for_each_entry(device, &device_list, dev_node) {
370 		if (device->ib_device->node_guid == cma_id->device->node_guid) {
371 			device->refcount++;
372 			mutex_unlock(&device_list_mutex);
373 			return device;
374 		}
375 	}
376 
377 	device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
378 	if (!device) {
379 		mutex_unlock(&device_list_mutex);
380 		return ERR_PTR(-ENOMEM);
381 	}
382 
383 	INIT_LIST_HEAD(&device->dev_node);
384 
385 	device->ib_device = cma_id->device;
386 	ret = isert_create_device_ib_res(device);
387 	if (ret) {
388 		kfree(device);
389 		mutex_unlock(&device_list_mutex);
390 		return ERR_PTR(ret);
391 	}
392 
393 	device->refcount++;
394 	list_add_tail(&device->dev_node, &device_list);
395 	mutex_unlock(&device_list_mutex);
396 
397 	return device;
398 }
399 
400 static void
401 isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
402 {
403 	struct fast_reg_descriptor *fr_desc, *tmp;
404 	int i = 0;
405 
406 	if (list_empty(&isert_conn->conn_frwr_pool))
407 		return;
408 
409 	pr_debug("Freeing conn %p frwr pool", isert_conn);
410 
411 	list_for_each_entry_safe(fr_desc, tmp,
412 				 &isert_conn->conn_frwr_pool, list) {
413 		list_del(&fr_desc->list);
414 		ib_free_fast_reg_page_list(fr_desc->data_frpl);
415 		ib_dereg_mr(fr_desc->data_mr);
416 		kfree(fr_desc);
417 		++i;
418 	}
419 
420 	if (i < isert_conn->conn_frwr_pool_size)
421 		pr_warn("Pool still has %d regions registered\n",
422 			isert_conn->conn_frwr_pool_size - i);
423 }
424 
425 static int
426 isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
427 {
428 	struct fast_reg_descriptor *fr_desc;
429 	struct isert_device *device = isert_conn->conn_device;
430 	int i, ret;
431 
432 	INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
433 	isert_conn->conn_frwr_pool_size = 0;
434 	for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
435 		fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
436 		if (!fr_desc) {
437 			pr_err("Failed to allocate fast_reg descriptor\n");
438 			ret = -ENOMEM;
439 			goto err;
440 		}
441 
442 		fr_desc->data_frpl =
443 			ib_alloc_fast_reg_page_list(device->ib_device,
444 						    ISCSI_ISER_SG_TABLESIZE);
445 		if (IS_ERR(fr_desc->data_frpl)) {
446 			pr_err("Failed to allocate fr_pg_list err=%ld\n",
447 			       PTR_ERR(fr_desc->data_frpl));
448 			ret = PTR_ERR(fr_desc->data_frpl);
449 			goto err;
450 		}
451 
452 		fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
453 					ISCSI_ISER_SG_TABLESIZE);
454 		if (IS_ERR(fr_desc->data_mr)) {
455 			pr_err("Failed to allocate frmr err=%ld\n",
456 			       PTR_ERR(fr_desc->data_mr));
457 			ret = PTR_ERR(fr_desc->data_mr);
458 			ib_free_fast_reg_page_list(fr_desc->data_frpl);
459 			goto err;
460 		}
461 		pr_debug("Create fr_desc %p page_list %p\n",
462 			 fr_desc, fr_desc->data_frpl->page_list);
463 
464 		fr_desc->valid = true;
465 		list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
466 		isert_conn->conn_frwr_pool_size++;
467 	}
468 
469 	pr_debug("Creating conn %p frwr pool size=%d",
470 		 isert_conn, isert_conn->conn_frwr_pool_size);
471 
472 	return 0;
473 
474 err:
475 	isert_conn_free_frwr_pool(isert_conn);
476 	return ret;
477 }
478 
479 static int
480 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
481 {
482 	struct iscsi_np *np = cma_id->context;
483 	struct isert_np *isert_np = np->np_context;
484 	struct isert_conn *isert_conn;
485 	struct isert_device *device;
486 	struct ib_device *ib_dev = cma_id->device;
487 	int ret = 0;
488 
489 	pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
490 		 cma_id, cma_id->context);
491 
492 	isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
493 	if (!isert_conn) {
494 		pr_err("Unable to allocate isert_conn\n");
495 		return -ENOMEM;
496 	}
497 	isert_conn->state = ISER_CONN_INIT;
498 	INIT_LIST_HEAD(&isert_conn->conn_accept_node);
499 	init_completion(&isert_conn->conn_login_comp);
500 	init_waitqueue_head(&isert_conn->conn_wait);
501 	init_waitqueue_head(&isert_conn->conn_wait_comp_err);
502 	kref_init(&isert_conn->conn_kref);
503 	kref_get(&isert_conn->conn_kref);
504 	mutex_init(&isert_conn->conn_mutex);
505 	mutex_init(&isert_conn->conn_comp_mutex);
506 	spin_lock_init(&isert_conn->conn_lock);
507 
508 	cma_id->context = isert_conn;
509 	isert_conn->conn_cm_id = cma_id;
510 	isert_conn->responder_resources = event->param.conn.responder_resources;
511 	isert_conn->initiator_depth = event->param.conn.initiator_depth;
512 	pr_debug("Using responder_resources: %u initiator_depth: %u\n",
513 		 isert_conn->responder_resources, isert_conn->initiator_depth);
514 
515 	isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
516 					ISER_RX_LOGIN_SIZE, GFP_KERNEL);
517 	if (!isert_conn->login_buf) {
518 		pr_err("Unable to allocate isert_conn->login_buf\n");
519 		ret = -ENOMEM;
520 		goto out;
521 	}
522 
523 	isert_conn->login_req_buf = isert_conn->login_buf;
524 	isert_conn->login_rsp_buf = isert_conn->login_buf +
525 				    ISCSI_DEF_MAX_RECV_SEG_LEN;
526 	pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
527 		 isert_conn->login_buf, isert_conn->login_req_buf,
528 		 isert_conn->login_rsp_buf);
529 
530 	isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
531 				(void *)isert_conn->login_req_buf,
532 				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
533 
534 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
535 	if (ret) {
536 		pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
537 		       ret);
538 		isert_conn->login_req_dma = 0;
539 		goto out_login_buf;
540 	}
541 
542 	isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
543 					(void *)isert_conn->login_rsp_buf,
544 					ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
545 
546 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
547 	if (ret) {
548 		pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
549 		       ret);
550 		isert_conn->login_rsp_dma = 0;
551 		goto out_req_dma_map;
552 	}
553 
554 	device = isert_device_find_by_ib_dev(cma_id);
555 	if (IS_ERR(device)) {
556 		ret = PTR_ERR(device);
557 		goto out_rsp_dma_map;
558 	}
559 
560 	isert_conn->conn_device = device;
561 	isert_conn->conn_pd = device->dev_pd;
562 	isert_conn->conn_mr = device->dev_mr;
563 
564 	if (device->use_frwr) {
565 		ret = isert_conn_create_frwr_pool(isert_conn);
566 		if (ret) {
567 			pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
568 			goto out_frwr;
569 		}
570 	}
571 
572 	ret = isert_conn_setup_qp(isert_conn, cma_id);
573 	if (ret)
574 		goto out_conn_dev;
575 
576 	mutex_lock(&isert_np->np_accept_mutex);
577 	list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
578 	mutex_unlock(&isert_np->np_accept_mutex);
579 
580 	pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
581 	wake_up(&isert_np->np_accept_wq);
582 	return 0;
583 
584 out_conn_dev:
585 	if (device->use_frwr)
586 		isert_conn_free_frwr_pool(isert_conn);
587 out_frwr:
588 	isert_device_try_release(device);
589 out_rsp_dma_map:
590 	ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
591 			    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
592 out_req_dma_map:
593 	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
594 			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
595 out_login_buf:
596 	kfree(isert_conn->login_buf);
597 out:
598 	kfree(isert_conn);
599 	return ret;
600 }
601 
602 static void
603 isert_connect_release(struct isert_conn *isert_conn)
604 {
605 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
606 	struct isert_device *device = isert_conn->conn_device;
607 	int cq_index;
608 
609 	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
610 
611 	if (device && device->use_frwr)
612 		isert_conn_free_frwr_pool(isert_conn);
613 
614 	if (isert_conn->conn_qp) {
615 		cq_index = ((struct isert_cq_desc *)
616 			isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
617 		pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
618 		isert_conn->conn_device->cq_active_qps[cq_index]--;
619 
620 		rdma_destroy_qp(isert_conn->conn_cm_id);
621 	}
622 
623 	isert_free_rx_descriptors(isert_conn);
624 	rdma_destroy_id(isert_conn->conn_cm_id);
625 
626 	if (isert_conn->login_buf) {
627 		ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
628 				    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
629 		ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
630 				    ISCSI_DEF_MAX_RECV_SEG_LEN,
631 				    DMA_FROM_DEVICE);
632 		kfree(isert_conn->login_buf);
633 	}
634 	kfree(isert_conn);
635 
636 	if (device)
637 		isert_device_try_release(device);
638 
639 	pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
640 }
641 
642 static void
643 isert_connected_handler(struct rdma_cm_id *cma_id)
644 {
645 	return;
646 }
647 
648 static void
649 isert_release_conn_kref(struct kref *kref)
650 {
651 	struct isert_conn *isert_conn = container_of(kref,
652 				struct isert_conn, conn_kref);
653 
654 	pr_debug("Calling isert_connect_release for final kref %s/%d\n",
655 		 current->comm, current->pid);
656 
657 	isert_connect_release(isert_conn);
658 }
659 
660 static void
661 isert_put_conn(struct isert_conn *isert_conn)
662 {
663 	kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
664 }
665 
666 static void
667 isert_disconnect_work(struct work_struct *work)
668 {
669 	struct isert_conn *isert_conn = container_of(work,
670 				struct isert_conn, conn_logout_work);
671 
672 	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
673 	mutex_lock(&isert_conn->conn_mutex);
674 	isert_conn->state = ISER_CONN_DOWN;
675 
676 	if (isert_conn->post_recv_buf_count == 0 &&
677 	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
678 		pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
679 		mutex_unlock(&isert_conn->conn_mutex);
680 		goto wake_up;
681 	}
682 	if (!isert_conn->conn_cm_id) {
683 		mutex_unlock(&isert_conn->conn_mutex);
684 		isert_put_conn(isert_conn);
685 		return;
686 	}
687 	if (!isert_conn->logout_posted) {
688 		pr_debug("Calling rdma_disconnect for !logout_posted from"
689 			 " isert_disconnect_work\n");
690 		rdma_disconnect(isert_conn->conn_cm_id);
691 		mutex_unlock(&isert_conn->conn_mutex);
692 		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
693 		goto wake_up;
694 	}
695 	mutex_unlock(&isert_conn->conn_mutex);
696 
697 wake_up:
698 	wake_up(&isert_conn->conn_wait);
699 	isert_put_conn(isert_conn);
700 }
701 
702 static void
703 isert_disconnected_handler(struct rdma_cm_id *cma_id)
704 {
705 	struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
706 
707 	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
708 	schedule_work(&isert_conn->conn_logout_work);
709 }
710 
711 static int
712 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
713 {
714 	int ret = 0;
715 
716 	pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
717 		 event->event, event->status, cma_id->context, cma_id);
718 
719 	switch (event->event) {
720 	case RDMA_CM_EVENT_CONNECT_REQUEST:
721 		pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
722 		ret = isert_connect_request(cma_id, event);
723 		break;
724 	case RDMA_CM_EVENT_ESTABLISHED:
725 		pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
726 		isert_connected_handler(cma_id);
727 		break;
728 	case RDMA_CM_EVENT_DISCONNECTED:
729 		pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
730 		isert_disconnected_handler(cma_id);
731 		break;
732 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
733 	case RDMA_CM_EVENT_ADDR_CHANGE:
734 		break;
735 	case RDMA_CM_EVENT_CONNECT_ERROR:
736 	default:
737 		pr_err("Unknown RDMA CMA event: %d\n", event->event);
738 		break;
739 	}
740 
741 	if (ret != 0) {
742 		pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
743 		       event->event, ret);
744 		dump_stack();
745 	}
746 
747 	return ret;
748 }
749 
750 static int
751 isert_post_recv(struct isert_conn *isert_conn, u32 count)
752 {
753 	struct ib_recv_wr *rx_wr, *rx_wr_failed;
754 	int i, ret;
755 	unsigned int rx_head = isert_conn->conn_rx_desc_head;
756 	struct iser_rx_desc *rx_desc;
757 
758 	for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
759 		rx_desc		= &isert_conn->conn_rx_descs[rx_head];
760 		rx_wr->wr_id	= (unsigned long)rx_desc;
761 		rx_wr->sg_list	= &rx_desc->rx_sg;
762 		rx_wr->num_sge	= 1;
763 		rx_wr->next	= rx_wr + 1;
764 		rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
765 	}
766 
767 	rx_wr--;
768 	rx_wr->next = NULL; /* mark end of work requests list */
769 
770 	isert_conn->post_recv_buf_count += count;
771 	ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
772 				&rx_wr_failed);
773 	if (ret) {
774 		pr_err("ib_post_recv() failed with ret: %d\n", ret);
775 		isert_conn->post_recv_buf_count -= count;
776 	} else {
777 		pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
778 		isert_conn->conn_rx_desc_head = rx_head;
779 	}
780 	return ret;
781 }
782 
783 static int
784 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
785 {
786 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
787 	struct ib_send_wr send_wr, *send_wr_failed;
788 	int ret;
789 
790 	ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
791 				      ISER_HEADERS_LEN, DMA_TO_DEVICE);
792 
793 	send_wr.next	= NULL;
794 	send_wr.wr_id	= (unsigned long)tx_desc;
795 	send_wr.sg_list	= tx_desc->tx_sg;
796 	send_wr.num_sge	= tx_desc->num_sge;
797 	send_wr.opcode	= IB_WR_SEND;
798 	send_wr.send_flags = IB_SEND_SIGNALED;
799 
800 	atomic_inc(&isert_conn->post_send_buf_count);
801 
802 	ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
803 	if (ret) {
804 		pr_err("ib_post_send() failed, ret: %d\n", ret);
805 		atomic_dec(&isert_conn->post_send_buf_count);
806 	}
807 
808 	return ret;
809 }
810 
811 static void
812 isert_create_send_desc(struct isert_conn *isert_conn,
813 		       struct isert_cmd *isert_cmd,
814 		       struct iser_tx_desc *tx_desc)
815 {
816 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
817 
818 	ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
819 				   ISER_HEADERS_LEN, DMA_TO_DEVICE);
820 
821 	memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
822 	tx_desc->iser_header.flags = ISER_VER;
823 
824 	tx_desc->num_sge = 1;
825 	tx_desc->isert_cmd = isert_cmd;
826 
827 	if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
828 		tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
829 		pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
830 	}
831 }
832 
833 static int
834 isert_init_tx_hdrs(struct isert_conn *isert_conn,
835 		   struct iser_tx_desc *tx_desc)
836 {
837 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
838 	u64 dma_addr;
839 
840 	dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
841 			ISER_HEADERS_LEN, DMA_TO_DEVICE);
842 	if (ib_dma_mapping_error(ib_dev, dma_addr)) {
843 		pr_err("ib_dma_mapping_error() failed\n");
844 		return -ENOMEM;
845 	}
846 
847 	tx_desc->dma_addr = dma_addr;
848 	tx_desc->tx_sg[0].addr	= tx_desc->dma_addr;
849 	tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
850 	tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
851 
852 	pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
853 		 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
854 		 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
855 
856 	return 0;
857 }
858 
859 static void
860 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
861 		   struct ib_send_wr *send_wr, bool coalesce)
862 {
863 	struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
864 
865 	isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
866 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
867 	send_wr->opcode = IB_WR_SEND;
868 	send_wr->sg_list = &tx_desc->tx_sg[0];
869 	send_wr->num_sge = isert_cmd->tx_desc.num_sge;
870 	/*
871 	 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
872 	 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
873 	 */
874 	mutex_lock(&isert_conn->conn_comp_mutex);
875 	if (coalesce &&
876 	    ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
877 		llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
878 		mutex_unlock(&isert_conn->conn_comp_mutex);
879 		return;
880 	}
881 	isert_conn->conn_comp_batch = 0;
882 	tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
883 	mutex_unlock(&isert_conn->conn_comp_mutex);
884 
885 	send_wr->send_flags = IB_SEND_SIGNALED;
886 }
887 
888 static int
889 isert_rdma_post_recvl(struct isert_conn *isert_conn)
890 {
891 	struct ib_recv_wr rx_wr, *rx_wr_fail;
892 	struct ib_sge sge;
893 	int ret;
894 
895 	memset(&sge, 0, sizeof(struct ib_sge));
896 	sge.addr = isert_conn->login_req_dma;
897 	sge.length = ISER_RX_LOGIN_SIZE;
898 	sge.lkey = isert_conn->conn_mr->lkey;
899 
900 	pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
901 		sge.addr, sge.length, sge.lkey);
902 
903 	memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
904 	rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
905 	rx_wr.sg_list = &sge;
906 	rx_wr.num_sge = 1;
907 
908 	isert_conn->post_recv_buf_count++;
909 	ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
910 	if (ret) {
911 		pr_err("ib_post_recv() failed: %d\n", ret);
912 		isert_conn->post_recv_buf_count--;
913 	}
914 
915 	pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
916 	return ret;
917 }
918 
919 static int
920 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
921 		   u32 length)
922 {
923 	struct isert_conn *isert_conn = conn->context;
924 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
925 	struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
926 	int ret;
927 
928 	isert_create_send_desc(isert_conn, NULL, tx_desc);
929 
930 	memcpy(&tx_desc->iscsi_header, &login->rsp[0],
931 	       sizeof(struct iscsi_hdr));
932 
933 	isert_init_tx_hdrs(isert_conn, tx_desc);
934 
935 	if (length > 0) {
936 		struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
937 
938 		ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
939 					   length, DMA_TO_DEVICE);
940 
941 		memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
942 
943 		ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
944 					      length, DMA_TO_DEVICE);
945 
946 		tx_dsg->addr	= isert_conn->login_rsp_dma;
947 		tx_dsg->length	= length;
948 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
949 		tx_desc->num_sge = 2;
950 	}
951 	if (!login->login_failed) {
952 		if (login->login_complete) {
953 			ret = isert_alloc_rx_descriptors(isert_conn);
954 			if (ret)
955 				return ret;
956 
957 			ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
958 			if (ret)
959 				return ret;
960 
961 			isert_conn->state = ISER_CONN_UP;
962 			goto post_send;
963 		}
964 
965 		ret = isert_rdma_post_recvl(isert_conn);
966 		if (ret)
967 			return ret;
968 	}
969 post_send:
970 	ret = isert_post_send(isert_conn, tx_desc);
971 	if (ret)
972 		return ret;
973 
974 	return 0;
975 }
976 
977 static void
978 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
979 		   struct isert_conn *isert_conn)
980 {
981 	struct iscsi_conn *conn = isert_conn->conn;
982 	struct iscsi_login *login = conn->conn_login;
983 	int size;
984 
985 	if (!login) {
986 		pr_err("conn->conn_login is NULL\n");
987 		dump_stack();
988 		return;
989 	}
990 
991 	if (login->first_request) {
992 		struct iscsi_login_req *login_req =
993 			(struct iscsi_login_req *)&rx_desc->iscsi_header;
994 		/*
995 		 * Setup the initial iscsi_login values from the leading
996 		 * login request PDU.
997 		 */
998 		login->leading_connection = (!login_req->tsih) ? 1 : 0;
999 		login->current_stage =
1000 			(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1001 			 >> 2;
1002 		login->version_min	= login_req->min_version;
1003 		login->version_max	= login_req->max_version;
1004 		memcpy(login->isid, login_req->isid, 6);
1005 		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
1006 		login->init_task_tag	= login_req->itt;
1007 		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1008 		login->cid		= be16_to_cpu(login_req->cid);
1009 		login->tsih		= be16_to_cpu(login_req->tsih);
1010 	}
1011 
1012 	memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1013 
1014 	size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1015 	pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1016 		 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1017 	memcpy(login->req_buf, &rx_desc->data[0], size);
1018 
1019 	if (login->first_request) {
1020 		complete(&isert_conn->conn_login_comp);
1021 		return;
1022 	}
1023 	schedule_delayed_work(&conn->login_work, 0);
1024 }
1025 
1026 static struct iscsi_cmd
1027 *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
1028 {
1029 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1030 	struct isert_cmd *isert_cmd;
1031 	struct iscsi_cmd *cmd;
1032 
1033 	cmd = iscsit_allocate_cmd(conn, gfp);
1034 	if (!cmd) {
1035 		pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1036 		return NULL;
1037 	}
1038 	isert_cmd = iscsit_priv_cmd(cmd);
1039 	isert_cmd->conn = isert_conn;
1040 	isert_cmd->iscsi_cmd = cmd;
1041 
1042 	return cmd;
1043 }
1044 
1045 static int
1046 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1047 		      struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1048 		      struct iser_rx_desc *rx_desc, unsigned char *buf)
1049 {
1050 	struct iscsi_conn *conn = isert_conn->conn;
1051 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1052 	struct scatterlist *sg;
1053 	int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1054 	bool dump_payload = false;
1055 
1056 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1057 	if (rc < 0)
1058 		return rc;
1059 
1060 	imm_data = cmd->immediate_data;
1061 	imm_data_len = cmd->first_burst_len;
1062 	unsol_data = cmd->unsolicited_data;
1063 
1064 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1065 	if (rc < 0) {
1066 		return 0;
1067 	} else if (rc > 0) {
1068 		dump_payload = true;
1069 		goto sequence_cmd;
1070 	}
1071 
1072 	if (!imm_data)
1073 		return 0;
1074 
1075 	sg = &cmd->se_cmd.t_data_sg[0];
1076 	sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1077 
1078 	pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1079 		 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1080 
1081 	sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1082 
1083 	cmd->write_data_done += imm_data_len;
1084 
1085 	if (cmd->write_data_done == cmd->se_cmd.data_length) {
1086 		spin_lock_bh(&cmd->istate_lock);
1087 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1088 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1089 		spin_unlock_bh(&cmd->istate_lock);
1090 	}
1091 
1092 sequence_cmd:
1093 	rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1094 
1095 	if (!rc && dump_payload == false && unsol_data)
1096 		iscsit_set_unsoliticed_dataout(cmd);
1097 
1098 	return 0;
1099 }
1100 
1101 static int
1102 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1103 			   struct iser_rx_desc *rx_desc, unsigned char *buf)
1104 {
1105 	struct scatterlist *sg_start;
1106 	struct iscsi_conn *conn = isert_conn->conn;
1107 	struct iscsi_cmd *cmd = NULL;
1108 	struct iscsi_data *hdr = (struct iscsi_data *)buf;
1109 	u32 unsol_data_len = ntoh24(hdr->dlength);
1110 	int rc, sg_nents, sg_off, page_off;
1111 
1112 	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1113 	if (rc < 0)
1114 		return rc;
1115 	else if (!cmd)
1116 		return 0;
1117 	/*
1118 	 * FIXME: Unexpected unsolicited_data out
1119 	 */
1120 	if (!cmd->unsolicited_data) {
1121 		pr_err("Received unexpected solicited data payload\n");
1122 		dump_stack();
1123 		return -1;
1124 	}
1125 
1126 	pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1127 		 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1128 
1129 	sg_off = cmd->write_data_done / PAGE_SIZE;
1130 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1131 	sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1132 	page_off = cmd->write_data_done % PAGE_SIZE;
1133 	/*
1134 	 * FIXME: Non page-aligned unsolicited_data out
1135 	 */
1136 	if (page_off) {
1137 		pr_err("Received unexpected non-page aligned data payload\n");
1138 		dump_stack();
1139 		return -1;
1140 	}
1141 	pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1142 		 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1143 
1144 	sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1145 			    unsol_data_len);
1146 
1147 	rc = iscsit_check_dataout_payload(cmd, hdr, false);
1148 	if (rc < 0)
1149 		return rc;
1150 
1151 	return 0;
1152 }
1153 
1154 static int
1155 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1156 		     struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1157 		     unsigned char *buf)
1158 {
1159 	struct iscsi_conn *conn = isert_conn->conn;
1160 	struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1161 	int rc;
1162 
1163 	rc = iscsit_setup_nop_out(conn, cmd, hdr);
1164 	if (rc < 0)
1165 		return rc;
1166 	/*
1167 	 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1168 	 */
1169 
1170 	return iscsit_process_nop_out(conn, cmd, hdr);
1171 }
1172 
1173 static int
1174 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1175 		      struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1176 		      struct iscsi_text *hdr)
1177 {
1178 	struct iscsi_conn *conn = isert_conn->conn;
1179 	u32 payload_length = ntoh24(hdr->dlength);
1180 	int rc;
1181 	unsigned char *text_in;
1182 
1183 	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1184 	if (rc < 0)
1185 		return rc;
1186 
1187 	text_in = kzalloc(payload_length, GFP_KERNEL);
1188 	if (!text_in) {
1189 		pr_err("Unable to allocate text_in of payload_length: %u\n",
1190 		       payload_length);
1191 		return -ENOMEM;
1192 	}
1193 	cmd->text_in_ptr = text_in;
1194 
1195 	memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1196 
1197 	return iscsit_process_text_cmd(conn, cmd, hdr);
1198 }
1199 
1200 static int
1201 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1202 		uint32_t read_stag, uint64_t read_va,
1203 		uint32_t write_stag, uint64_t write_va)
1204 {
1205 	struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1206 	struct iscsi_conn *conn = isert_conn->conn;
1207 	struct iscsi_session *sess = conn->sess;
1208 	struct iscsi_cmd *cmd;
1209 	struct isert_cmd *isert_cmd;
1210 	int ret = -EINVAL;
1211 	u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1212 
1213 	if (sess->sess_ops->SessionType &&
1214 	   (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1215 		pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1216 		       " ignoring\n", opcode);
1217 		return 0;
1218 	}
1219 
1220 	switch (opcode) {
1221 	case ISCSI_OP_SCSI_CMD:
1222 		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1223 		if (!cmd)
1224 			break;
1225 
1226 		isert_cmd = iscsit_priv_cmd(cmd);
1227 		isert_cmd->read_stag = read_stag;
1228 		isert_cmd->read_va = read_va;
1229 		isert_cmd->write_stag = write_stag;
1230 		isert_cmd->write_va = write_va;
1231 
1232 		ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1233 					rx_desc, (unsigned char *)hdr);
1234 		break;
1235 	case ISCSI_OP_NOOP_OUT:
1236 		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1237 		if (!cmd)
1238 			break;
1239 
1240 		isert_cmd = iscsit_priv_cmd(cmd);
1241 		ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1242 					   rx_desc, (unsigned char *)hdr);
1243 		break;
1244 	case ISCSI_OP_SCSI_DATA_OUT:
1245 		ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1246 						(unsigned char *)hdr);
1247 		break;
1248 	case ISCSI_OP_SCSI_TMFUNC:
1249 		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1250 		if (!cmd)
1251 			break;
1252 
1253 		ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1254 						(unsigned char *)hdr);
1255 		break;
1256 	case ISCSI_OP_LOGOUT:
1257 		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1258 		if (!cmd)
1259 			break;
1260 
1261 		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1262 		if (ret > 0)
1263 			wait_for_completion_timeout(&conn->conn_logout_comp,
1264 						    SECONDS_FOR_LOGOUT_COMP *
1265 						    HZ);
1266 		break;
1267 	case ISCSI_OP_TEXT:
1268 		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1269 		if (!cmd)
1270 			break;
1271 
1272 		isert_cmd = iscsit_priv_cmd(cmd);
1273 		ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1274 					    rx_desc, (struct iscsi_text *)hdr);
1275 		break;
1276 	default:
1277 		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1278 		dump_stack();
1279 		break;
1280 	}
1281 
1282 	return ret;
1283 }
1284 
1285 static void
1286 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1287 {
1288 	struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1289 	uint64_t read_va = 0, write_va = 0;
1290 	uint32_t read_stag = 0, write_stag = 0;
1291 	int rc;
1292 
1293 	switch (iser_hdr->flags & 0xF0) {
1294 	case ISCSI_CTRL:
1295 		if (iser_hdr->flags & ISER_RSV) {
1296 			read_stag = be32_to_cpu(iser_hdr->read_stag);
1297 			read_va = be64_to_cpu(iser_hdr->read_va);
1298 			pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1299 				 read_stag, (unsigned long long)read_va);
1300 		}
1301 		if (iser_hdr->flags & ISER_WSV) {
1302 			write_stag = be32_to_cpu(iser_hdr->write_stag);
1303 			write_va = be64_to_cpu(iser_hdr->write_va);
1304 			pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1305 				 write_stag, (unsigned long long)write_va);
1306 		}
1307 
1308 		pr_debug("ISER ISCSI_CTRL PDU\n");
1309 		break;
1310 	case ISER_HELLO:
1311 		pr_err("iSER Hello message\n");
1312 		break;
1313 	default:
1314 		pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1315 		break;
1316 	}
1317 
1318 	rc = isert_rx_opcode(isert_conn, rx_desc,
1319 			     read_stag, read_va, write_stag, write_va);
1320 }
1321 
1322 static void
1323 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1324 		    unsigned long xfer_len)
1325 {
1326 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1327 	struct iscsi_hdr *hdr;
1328 	u64 rx_dma;
1329 	int rx_buflen, outstanding;
1330 
1331 	if ((char *)desc == isert_conn->login_req_buf) {
1332 		rx_dma = isert_conn->login_req_dma;
1333 		rx_buflen = ISER_RX_LOGIN_SIZE;
1334 		pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1335 			 rx_dma, rx_buflen);
1336 	} else {
1337 		rx_dma = desc->dma_addr;
1338 		rx_buflen = ISER_RX_PAYLOAD_SIZE;
1339 		pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1340 			 rx_dma, rx_buflen);
1341 	}
1342 
1343 	ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1344 
1345 	hdr = &desc->iscsi_header;
1346 	pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1347 		 hdr->opcode, hdr->itt, hdr->flags,
1348 		 (int)(xfer_len - ISER_HEADERS_LEN));
1349 
1350 	if ((char *)desc == isert_conn->login_req_buf)
1351 		isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1352 				   isert_conn);
1353 	else
1354 		isert_rx_do_work(desc, isert_conn);
1355 
1356 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1357 				      DMA_FROM_DEVICE);
1358 
1359 	isert_conn->post_recv_buf_count--;
1360 	pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1361 		 isert_conn->post_recv_buf_count);
1362 
1363 	if ((char *)desc == isert_conn->login_req_buf)
1364 		return;
1365 
1366 	outstanding = isert_conn->post_recv_buf_count;
1367 	if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1368 		int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1369 				ISERT_MIN_POSTED_RX);
1370 		err = isert_post_recv(isert_conn, count);
1371 		if (err) {
1372 			pr_err("isert_post_recv() count: %d failed, %d\n",
1373 			       count, err);
1374 		}
1375 	}
1376 }
1377 
1378 static void
1379 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1380 {
1381 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1382 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1383 
1384 	pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1385 	if (wr->sge) {
1386 		pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1387 		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1388 				(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1389 				DMA_TO_DEVICE : DMA_FROM_DEVICE);
1390 		wr->sge = NULL;
1391 	}
1392 
1393 	if (wr->send_wr) {
1394 		pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1395 		kfree(wr->send_wr);
1396 		wr->send_wr = NULL;
1397 	}
1398 
1399 	if (wr->ib_sge) {
1400 		pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1401 		kfree(wr->ib_sge);
1402 		wr->ib_sge = NULL;
1403 	}
1404 }
1405 
1406 static void
1407 isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1408 {
1409 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1410 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1411 	LIST_HEAD(unmap_list);
1412 
1413 	pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
1414 
1415 	if (wr->fr_desc) {
1416 		pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
1417 			 isert_cmd, wr->fr_desc);
1418 		spin_lock_bh(&isert_conn->conn_lock);
1419 		list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
1420 		spin_unlock_bh(&isert_conn->conn_lock);
1421 		wr->fr_desc = NULL;
1422 	}
1423 
1424 	if (wr->sge) {
1425 		pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
1426 		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1427 				(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1428 				DMA_TO_DEVICE : DMA_FROM_DEVICE);
1429 		wr->sge = NULL;
1430 	}
1431 
1432 	wr->ib_sge = NULL;
1433 	wr->send_wr = NULL;
1434 }
1435 
1436 static void
1437 isert_put_cmd(struct isert_cmd *isert_cmd)
1438 {
1439 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1440 	struct isert_conn *isert_conn = isert_cmd->conn;
1441 	struct iscsi_conn *conn = isert_conn->conn;
1442 	struct isert_device *device = isert_conn->conn_device;
1443 
1444 	pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1445 
1446 	switch (cmd->iscsi_opcode) {
1447 	case ISCSI_OP_SCSI_CMD:
1448 		spin_lock_bh(&conn->cmd_lock);
1449 		if (!list_empty(&cmd->i_conn_node))
1450 			list_del(&cmd->i_conn_node);
1451 		spin_unlock_bh(&conn->cmd_lock);
1452 
1453 		if (cmd->data_direction == DMA_TO_DEVICE)
1454 			iscsit_stop_dataout_timer(cmd);
1455 
1456 		device->unreg_rdma_mem(isert_cmd, isert_conn);
1457 		transport_generic_free_cmd(&cmd->se_cmd, 0);
1458 		break;
1459 	case ISCSI_OP_SCSI_TMFUNC:
1460 		spin_lock_bh(&conn->cmd_lock);
1461 		if (!list_empty(&cmd->i_conn_node))
1462 			list_del(&cmd->i_conn_node);
1463 		spin_unlock_bh(&conn->cmd_lock);
1464 
1465 		transport_generic_free_cmd(&cmd->se_cmd, 0);
1466 		break;
1467 	case ISCSI_OP_REJECT:
1468 	case ISCSI_OP_NOOP_OUT:
1469 	case ISCSI_OP_TEXT:
1470 		spin_lock_bh(&conn->cmd_lock);
1471 		if (!list_empty(&cmd->i_conn_node))
1472 			list_del(&cmd->i_conn_node);
1473 		spin_unlock_bh(&conn->cmd_lock);
1474 
1475 		/*
1476 		 * Handle special case for REJECT when iscsi_add_reject*() has
1477 		 * overwritten the original iscsi_opcode assignment, and the
1478 		 * associated cmd->se_cmd needs to be released.
1479 		 */
1480 		if (cmd->se_cmd.se_tfo != NULL) {
1481 			pr_debug("Calling transport_generic_free_cmd from"
1482 				 " isert_put_cmd for 0x%02x\n",
1483 				 cmd->iscsi_opcode);
1484 			transport_generic_free_cmd(&cmd->se_cmd, 0);
1485 			break;
1486 		}
1487 		/*
1488 		 * Fall-through
1489 		 */
1490 	default:
1491 		iscsit_release_cmd(cmd);
1492 		break;
1493 	}
1494 }
1495 
1496 static void
1497 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1498 {
1499 	if (tx_desc->dma_addr != 0) {
1500 		pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1501 		ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1502 				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
1503 		tx_desc->dma_addr = 0;
1504 	}
1505 }
1506 
1507 static void
1508 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1509 		     struct ib_device *ib_dev)
1510 {
1511 	if (isert_cmd->pdu_buf_dma != 0) {
1512 		pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1513 		ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1514 				    isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1515 		isert_cmd->pdu_buf_dma = 0;
1516 	}
1517 
1518 	isert_unmap_tx_desc(tx_desc, ib_dev);
1519 	isert_put_cmd(isert_cmd);
1520 }
1521 
1522 static void
1523 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1524 			   struct isert_cmd *isert_cmd)
1525 {
1526 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1527 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1528 	struct se_cmd *se_cmd = &cmd->se_cmd;
1529 	struct isert_conn *isert_conn = isert_cmd->conn;
1530 	struct isert_device *device = isert_conn->conn_device;
1531 
1532 	iscsit_stop_dataout_timer(cmd);
1533 	device->unreg_rdma_mem(isert_cmd, isert_conn);
1534 	cmd->write_data_done = wr->cur_rdma_length;
1535 
1536 	pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1537 	spin_lock_bh(&cmd->istate_lock);
1538 	cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1539 	cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1540 	spin_unlock_bh(&cmd->istate_lock);
1541 
1542 	target_execute_cmd(se_cmd);
1543 }
1544 
1545 static void
1546 isert_do_control_comp(struct work_struct *work)
1547 {
1548 	struct isert_cmd *isert_cmd = container_of(work,
1549 			struct isert_cmd, comp_work);
1550 	struct isert_conn *isert_conn = isert_cmd->conn;
1551 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1552 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1553 
1554 	switch (cmd->i_state) {
1555 	case ISTATE_SEND_TASKMGTRSP:
1556 		pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1557 
1558 		atomic_dec(&isert_conn->post_send_buf_count);
1559 		iscsit_tmr_post_handler(cmd, cmd->conn);
1560 
1561 		cmd->i_state = ISTATE_SENT_STATUS;
1562 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1563 		break;
1564 	case ISTATE_SEND_REJECT:
1565 		pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1566 		atomic_dec(&isert_conn->post_send_buf_count);
1567 
1568 		cmd->i_state = ISTATE_SENT_STATUS;
1569 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1570 		break;
1571 	case ISTATE_SEND_LOGOUTRSP:
1572 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1573 		/*
1574 		 * Call atomic_dec(&isert_conn->post_send_buf_count)
1575 		 * from isert_free_conn()
1576 		 */
1577 		isert_conn->logout_posted = true;
1578 		iscsit_logout_post_handler(cmd, cmd->conn);
1579 		break;
1580 	case ISTATE_SEND_TEXTRSP:
1581 		atomic_dec(&isert_conn->post_send_buf_count);
1582 		cmd->i_state = ISTATE_SENT_STATUS;
1583 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1584 		break;
1585 	default:
1586 		pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1587 		dump_stack();
1588 		break;
1589 	}
1590 }
1591 
1592 static void
1593 isert_response_completion(struct iser_tx_desc *tx_desc,
1594 			  struct isert_cmd *isert_cmd,
1595 			  struct isert_conn *isert_conn,
1596 			  struct ib_device *ib_dev)
1597 {
1598 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1599 
1600 	if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1601 	    cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1602 	    cmd->i_state == ISTATE_SEND_REJECT ||
1603 	    cmd->i_state == ISTATE_SEND_TEXTRSP) {
1604 		isert_unmap_tx_desc(tx_desc, ib_dev);
1605 
1606 		INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1607 		queue_work(isert_comp_wq, &isert_cmd->comp_work);
1608 		return;
1609 	}
1610 	atomic_dec(&isert_conn->post_send_buf_count);
1611 
1612 	cmd->i_state = ISTATE_SENT_STATUS;
1613 	isert_completion_put(tx_desc, isert_cmd, ib_dev);
1614 }
1615 
1616 static void
1617 __isert_send_completion(struct iser_tx_desc *tx_desc,
1618 		        struct isert_conn *isert_conn)
1619 {
1620 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1621 	struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1622 	struct isert_rdma_wr *wr;
1623 
1624 	if (!isert_cmd) {
1625 		atomic_dec(&isert_conn->post_send_buf_count);
1626 		isert_unmap_tx_desc(tx_desc, ib_dev);
1627 		return;
1628 	}
1629 	wr = &isert_cmd->rdma_wr;
1630 
1631 	switch (wr->iser_ib_op) {
1632 	case ISER_IB_RECV:
1633 		pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1634 		dump_stack();
1635 		break;
1636 	case ISER_IB_SEND:
1637 		pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1638 		isert_response_completion(tx_desc, isert_cmd,
1639 					  isert_conn, ib_dev);
1640 		break;
1641 	case ISER_IB_RDMA_WRITE:
1642 		pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1643 		dump_stack();
1644 		break;
1645 	case ISER_IB_RDMA_READ:
1646 		pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1647 
1648 		atomic_dec(&isert_conn->post_send_buf_count);
1649 		isert_completion_rdma_read(tx_desc, isert_cmd);
1650 		break;
1651 	default:
1652 		pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1653 		dump_stack();
1654 		break;
1655 	}
1656 }
1657 
1658 static void
1659 isert_send_completion(struct iser_tx_desc *tx_desc,
1660 		      struct isert_conn *isert_conn)
1661 {
1662 	struct llist_node *llnode = tx_desc->comp_llnode_batch;
1663 	struct iser_tx_desc *t;
1664 	/*
1665 	 * Drain coalesced completion llist starting from comp_llnode_batch
1666 	 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1667 	 */
1668 	while (llnode) {
1669 		t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1670 		llnode = llist_next(llnode);
1671 		__isert_send_completion(t, isert_conn);
1672 	}
1673 	__isert_send_completion(tx_desc, isert_conn);
1674 }
1675 
1676 static void
1677 isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1678 {
1679 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1680 
1681 	if (tx_desc) {
1682 		struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1683 
1684 		if (!isert_cmd)
1685 			isert_unmap_tx_desc(tx_desc, ib_dev);
1686 		else
1687 			isert_completion_put(tx_desc, isert_cmd, ib_dev);
1688 	}
1689 
1690 	if (isert_conn->post_recv_buf_count == 0 &&
1691 	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
1692 		pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1693 		pr_debug("Calling wake_up from isert_cq_comp_err\n");
1694 
1695 		mutex_lock(&isert_conn->conn_mutex);
1696 		if (isert_conn->state != ISER_CONN_DOWN)
1697 			isert_conn->state = ISER_CONN_TERMINATING;
1698 		mutex_unlock(&isert_conn->conn_mutex);
1699 
1700 		wake_up(&isert_conn->conn_wait_comp_err);
1701 	}
1702 }
1703 
1704 static void
1705 isert_cq_tx_work(struct work_struct *work)
1706 {
1707 	struct isert_cq_desc *cq_desc = container_of(work,
1708 				struct isert_cq_desc, cq_tx_work);
1709 	struct isert_device *device = cq_desc->device;
1710 	int cq_index = cq_desc->cq_index;
1711 	struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1712 	struct isert_conn *isert_conn;
1713 	struct iser_tx_desc *tx_desc;
1714 	struct ib_wc wc;
1715 
1716 	while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1717 		tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1718 		isert_conn = wc.qp->qp_context;
1719 
1720 		if (wc.status == IB_WC_SUCCESS) {
1721 			isert_send_completion(tx_desc, isert_conn);
1722 		} else {
1723 			pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1724 			pr_debug("TX wc.status: 0x%08x\n", wc.status);
1725 			pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1726 			atomic_dec(&isert_conn->post_send_buf_count);
1727 			isert_cq_comp_err(tx_desc, isert_conn);
1728 		}
1729 	}
1730 
1731 	ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1732 }
1733 
1734 static void
1735 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1736 {
1737 	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1738 
1739 	queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1740 }
1741 
1742 static void
1743 isert_cq_rx_work(struct work_struct *work)
1744 {
1745 	struct isert_cq_desc *cq_desc = container_of(work,
1746 			struct isert_cq_desc, cq_rx_work);
1747 	struct isert_device *device = cq_desc->device;
1748 	int cq_index = cq_desc->cq_index;
1749 	struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1750 	struct isert_conn *isert_conn;
1751 	struct iser_rx_desc *rx_desc;
1752 	struct ib_wc wc;
1753 	unsigned long xfer_len;
1754 
1755 	while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1756 		rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1757 		isert_conn = wc.qp->qp_context;
1758 
1759 		if (wc.status == IB_WC_SUCCESS) {
1760 			xfer_len = (unsigned long)wc.byte_len;
1761 			isert_rx_completion(rx_desc, isert_conn, xfer_len);
1762 		} else {
1763 			pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1764 			if (wc.status != IB_WC_WR_FLUSH_ERR) {
1765 				pr_debug("RX wc.status: 0x%08x\n", wc.status);
1766 				pr_debug("RX wc.vendor_err: 0x%08x\n",
1767 					 wc.vendor_err);
1768 			}
1769 			isert_conn->post_recv_buf_count--;
1770 			isert_cq_comp_err(NULL, isert_conn);
1771 		}
1772 	}
1773 
1774 	ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1775 }
1776 
1777 static void
1778 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1779 {
1780 	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1781 
1782 	queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1783 }
1784 
1785 static int
1786 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1787 {
1788 	struct ib_send_wr *wr_failed;
1789 	int ret;
1790 
1791 	atomic_inc(&isert_conn->post_send_buf_count);
1792 
1793 	ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1794 			   &wr_failed);
1795 	if (ret) {
1796 		pr_err("ib_post_send failed with %d\n", ret);
1797 		atomic_dec(&isert_conn->post_send_buf_count);
1798 		return ret;
1799 	}
1800 	return ret;
1801 }
1802 
1803 static int
1804 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1805 {
1806 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1807 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1808 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1809 	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1810 				&isert_cmd->tx_desc.iscsi_header;
1811 
1812 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1813 	iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1814 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1815 	/*
1816 	 * Attach SENSE DATA payload to iSCSI Response PDU
1817 	 */
1818 	if (cmd->se_cmd.sense_buffer &&
1819 	    ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1820 	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1821 		struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1822 		struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1823 		u32 padding, pdu_len;
1824 
1825 		put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1826 				   cmd->sense_buffer);
1827 		cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1828 
1829 		padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1830 		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1831 		pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1832 
1833 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1834 				(void *)cmd->sense_buffer, pdu_len,
1835 				DMA_TO_DEVICE);
1836 
1837 		isert_cmd->pdu_buf_len = pdu_len;
1838 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
1839 		tx_dsg->length	= pdu_len;
1840 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
1841 		isert_cmd->tx_desc.num_sge = 2;
1842 	}
1843 
1844 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
1845 
1846 	pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1847 
1848 	return isert_post_response(isert_conn, isert_cmd);
1849 }
1850 
1851 static int
1852 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1853 		bool nopout_response)
1854 {
1855 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1856 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1857 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1858 
1859 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1860 	iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1861 			       &isert_cmd->tx_desc.iscsi_header,
1862 			       nopout_response);
1863 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1864 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1865 
1866 	pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1867 
1868 	return isert_post_response(isert_conn, isert_cmd);
1869 }
1870 
1871 static int
1872 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1873 {
1874 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1875 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1876 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1877 
1878 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1879 	iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1880 				&isert_cmd->tx_desc.iscsi_header);
1881 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1882 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1883 
1884 	pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1885 
1886 	return isert_post_response(isert_conn, isert_cmd);
1887 }
1888 
1889 static int
1890 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1891 {
1892 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1893 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1894 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1895 
1896 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1897 	iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1898 				  &isert_cmd->tx_desc.iscsi_header);
1899 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1900 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1901 
1902 	pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1903 
1904 	return isert_post_response(isert_conn, isert_cmd);
1905 }
1906 
1907 static int
1908 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1909 {
1910 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1911 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1912 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1913 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1914 	struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1915 	struct iscsi_reject *hdr =
1916 		(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1917 
1918 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1919 	iscsit_build_reject(cmd, conn, hdr);
1920 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1921 
1922 	hton24(hdr->dlength, ISCSI_HDR_LEN);
1923 	isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1924 			(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1925 			DMA_TO_DEVICE);
1926 	isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1927 	tx_dsg->addr	= isert_cmd->pdu_buf_dma;
1928 	tx_dsg->length	= ISCSI_HDR_LEN;
1929 	tx_dsg->lkey	= isert_conn->conn_mr->lkey;
1930 	isert_cmd->tx_desc.num_sge = 2;
1931 
1932 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1933 
1934 	pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1935 
1936 	return isert_post_response(isert_conn, isert_cmd);
1937 }
1938 
1939 static int
1940 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1941 {
1942 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1943 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1944 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1945 	struct iscsi_text_rsp *hdr =
1946 		(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1947 	u32 txt_rsp_len;
1948 	int rc;
1949 
1950 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1951 	rc = iscsit_build_text_rsp(cmd, conn, hdr);
1952 	if (rc < 0)
1953 		return rc;
1954 
1955 	txt_rsp_len = rc;
1956 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1957 
1958 	if (txt_rsp_len) {
1959 		struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1960 		struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1961 		void *txt_rsp_buf = cmd->buf_ptr;
1962 
1963 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1964 				txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1965 
1966 		isert_cmd->pdu_buf_len = txt_rsp_len;
1967 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
1968 		tx_dsg->length	= txt_rsp_len;
1969 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
1970 		isert_cmd->tx_desc.num_sge = 2;
1971 	}
1972 	isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1973 
1974 	pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1975 
1976 	return isert_post_response(isert_conn, isert_cmd);
1977 }
1978 
1979 static int
1980 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1981 		    struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1982 		    u32 data_left, u32 offset)
1983 {
1984 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1985 	struct scatterlist *sg_start, *tmp_sg;
1986 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1987 	u32 sg_off, page_off;
1988 	int i = 0, sg_nents;
1989 
1990 	sg_off = offset / PAGE_SIZE;
1991 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1992 	sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1993 	page_off = offset % PAGE_SIZE;
1994 
1995 	send_wr->sg_list = ib_sge;
1996 	send_wr->num_sge = sg_nents;
1997 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1998 	/*
1999 	 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2000 	 */
2001 	for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2002 		pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2003 			 (unsigned long long)tmp_sg->dma_address,
2004 			 tmp_sg->length, page_off);
2005 
2006 		ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2007 		ib_sge->length = min_t(u32, data_left,
2008 				ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2009 		ib_sge->lkey = isert_conn->conn_mr->lkey;
2010 
2011 		pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2012 			 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2013 		page_off = 0;
2014 		data_left -= ib_sge->length;
2015 		ib_sge++;
2016 		pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2017 	}
2018 
2019 	pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2020 		 send_wr->sg_list, send_wr->num_sge);
2021 
2022 	return sg_nents;
2023 }
2024 
2025 static int
2026 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2027 	       struct isert_rdma_wr *wr)
2028 {
2029 	struct se_cmd *se_cmd = &cmd->se_cmd;
2030 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2031 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2032 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2033 	struct ib_send_wr *send_wr;
2034 	struct ib_sge *ib_sge;
2035 	struct scatterlist *sg_start;
2036 	u32 sg_off = 0, sg_nents;
2037 	u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2038 	int ret = 0, count, i, ib_sge_cnt;
2039 
2040 	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2041 		data_left = se_cmd->data_length;
2042 	} else {
2043 		sg_off = cmd->write_data_done / PAGE_SIZE;
2044 		data_left = se_cmd->data_length - cmd->write_data_done;
2045 		offset = cmd->write_data_done;
2046 		isert_cmd->tx_desc.isert_cmd = isert_cmd;
2047 	}
2048 
2049 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2050 	sg_nents = se_cmd->t_data_nents - sg_off;
2051 
2052 	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2053 			      (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2054 			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
2055 	if (unlikely(!count)) {
2056 		pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2057 		return -EINVAL;
2058 	}
2059 	wr->sge = sg_start;
2060 	wr->num_sge = sg_nents;
2061 	wr->cur_rdma_length = data_left;
2062 	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2063 		 isert_cmd, count, sg_start, sg_nents, data_left);
2064 
2065 	ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2066 	if (!ib_sge) {
2067 		pr_warn("Unable to allocate ib_sge\n");
2068 		ret = -ENOMEM;
2069 		goto unmap_sg;
2070 	}
2071 	wr->ib_sge = ib_sge;
2072 
2073 	wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2074 	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2075 				GFP_KERNEL);
2076 	if (!wr->send_wr) {
2077 		pr_debug("Unable to allocate wr->send_wr\n");
2078 		ret = -ENOMEM;
2079 		goto unmap_sg;
2080 	}
2081 
2082 	wr->isert_cmd = isert_cmd;
2083 	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2084 
2085 	for (i = 0; i < wr->send_wr_num; i++) {
2086 		send_wr = &isert_cmd->rdma_wr.send_wr[i];
2087 		data_len = min(data_left, rdma_write_max);
2088 
2089 		send_wr->send_flags = 0;
2090 		if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2091 			send_wr->opcode = IB_WR_RDMA_WRITE;
2092 			send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2093 			send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2094 			if (i + 1 == wr->send_wr_num)
2095 				send_wr->next = &isert_cmd->tx_desc.send_wr;
2096 			else
2097 				send_wr->next = &wr->send_wr[i + 1];
2098 		} else {
2099 			send_wr->opcode = IB_WR_RDMA_READ;
2100 			send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2101 			send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2102 			if (i + 1 == wr->send_wr_num)
2103 				send_wr->send_flags = IB_SEND_SIGNALED;
2104 			else
2105 				send_wr->next = &wr->send_wr[i + 1];
2106 		}
2107 
2108 		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2109 					send_wr, data_len, offset);
2110 		ib_sge += ib_sge_cnt;
2111 
2112 		offset += data_len;
2113 		va_offset += data_len;
2114 		data_left -= data_len;
2115 	}
2116 
2117 	return 0;
2118 unmap_sg:
2119 	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2120 			(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2121 			DMA_TO_DEVICE : DMA_FROM_DEVICE);
2122 	return ret;
2123 }
2124 
2125 static int
2126 isert_map_fr_pagelist(struct ib_device *ib_dev,
2127 		      struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2128 {
2129 	u64 start_addr, end_addr, page, chunk_start = 0;
2130 	struct scatterlist *tmp_sg;
2131 	int i = 0, new_chunk, last_ent, n_pages;
2132 
2133 	n_pages = 0;
2134 	new_chunk = 1;
2135 	last_ent = sg_nents - 1;
2136 	for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2137 		start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2138 		if (new_chunk)
2139 			chunk_start = start_addr;
2140 		end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2141 
2142 		pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2143 			 i, (unsigned long long)tmp_sg->dma_address,
2144 			 tmp_sg->length);
2145 
2146 		if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2147 			new_chunk = 0;
2148 			continue;
2149 		}
2150 		new_chunk = 1;
2151 
2152 		page = chunk_start & PAGE_MASK;
2153 		do {
2154 			fr_pl[n_pages++] = page;
2155 			pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2156 				 n_pages - 1, page);
2157 			page += PAGE_SIZE;
2158 		} while (page < end_addr);
2159 	}
2160 
2161 	return n_pages;
2162 }
2163 
2164 static int
2165 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2166 		  struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
2167 		  struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
2168 {
2169 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2170 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2171 	struct scatterlist *sg_start;
2172 	u32 sg_off, page_off;
2173 	struct ib_send_wr fr_wr, inv_wr;
2174 	struct ib_send_wr *bad_wr, *wr = NULL;
2175 	u8 key;
2176 	int ret, sg_nents, pagelist_len;
2177 
2178 	sg_off = offset / PAGE_SIZE;
2179 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2180 	sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2181 			 ISCSI_ISER_SG_TABLESIZE);
2182 	page_off = offset % PAGE_SIZE;
2183 
2184 	pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
2185 		 isert_cmd, fr_desc, sg_nents, sg_off, offset);
2186 
2187 	pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2188 					     &fr_desc->data_frpl->page_list[0]);
2189 
2190 	if (!fr_desc->valid) {
2191 		memset(&inv_wr, 0, sizeof(inv_wr));
2192 		inv_wr.opcode = IB_WR_LOCAL_INV;
2193 		inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2194 		wr = &inv_wr;
2195 		/* Bump the key */
2196 		key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2197 		ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2198 	}
2199 
2200 	/* Prepare FASTREG WR */
2201 	memset(&fr_wr, 0, sizeof(fr_wr));
2202 	fr_wr.opcode = IB_WR_FAST_REG_MR;
2203 	fr_wr.wr.fast_reg.iova_start =
2204 		fr_desc->data_frpl->page_list[0] + page_off;
2205 	fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2206 	fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2207 	fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2208 	fr_wr.wr.fast_reg.length = data_len;
2209 	fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2210 	fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2211 
2212 	if (!wr)
2213 		wr = &fr_wr;
2214 	else
2215 		wr->next = &fr_wr;
2216 
2217 	ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2218 	if (ret) {
2219 		pr_err("fast registration failed, ret:%d\n", ret);
2220 		return ret;
2221 	}
2222 	fr_desc->valid = false;
2223 
2224 	ib_sge->lkey = fr_desc->data_mr->lkey;
2225 	ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2226 	ib_sge->length = data_len;
2227 
2228 	pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2229 		 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2230 
2231 	return ret;
2232 }
2233 
2234 static int
2235 isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2236 		    struct isert_rdma_wr *wr)
2237 {
2238 	struct se_cmd *se_cmd = &cmd->se_cmd;
2239 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2240 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2241 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2242 	struct ib_send_wr *send_wr;
2243 	struct ib_sge *ib_sge;
2244 	struct scatterlist *sg_start;
2245 	struct fast_reg_descriptor *fr_desc;
2246 	u32 sg_off = 0, sg_nents;
2247 	u32 offset = 0, data_len, data_left, rdma_write_max;
2248 	int ret = 0, count;
2249 	unsigned long flags;
2250 
2251 	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2252 		data_left = se_cmd->data_length;
2253 	} else {
2254 		sg_off = cmd->write_data_done / PAGE_SIZE;
2255 		data_left = se_cmd->data_length - cmd->write_data_done;
2256 		offset = cmd->write_data_done;
2257 		isert_cmd->tx_desc.isert_cmd = isert_cmd;
2258 	}
2259 
2260 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2261 	sg_nents = se_cmd->t_data_nents - sg_off;
2262 
2263 	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2264 			      (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2265 			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
2266 	if (unlikely(!count)) {
2267 		pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2268 		return -EINVAL;
2269 	}
2270 	wr->sge = sg_start;
2271 	wr->num_sge = sg_nents;
2272 	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2273 		 isert_cmd, count, sg_start, sg_nents, data_left);
2274 
2275 	memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2276 	ib_sge = &wr->s_ib_sge;
2277 	wr->ib_sge = ib_sge;
2278 
2279 	wr->send_wr_num = 1;
2280 	memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2281 	wr->send_wr = &wr->s_send_wr;
2282 
2283 	wr->isert_cmd = isert_cmd;
2284 	rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2285 
2286 	send_wr = &isert_cmd->rdma_wr.s_send_wr;
2287 	send_wr->sg_list = ib_sge;
2288 	send_wr->num_sge = 1;
2289 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2290 	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2291 		send_wr->opcode = IB_WR_RDMA_WRITE;
2292 		send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2293 		send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2294 		send_wr->send_flags = 0;
2295 		send_wr->next = &isert_cmd->tx_desc.send_wr;
2296 	} else {
2297 		send_wr->opcode = IB_WR_RDMA_READ;
2298 		send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2299 		send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2300 		send_wr->send_flags = IB_SEND_SIGNALED;
2301 	}
2302 
2303 	data_len = min(data_left, rdma_write_max);
2304 	wr->cur_rdma_length = data_len;
2305 
2306 	/* if there is a single dma entry, dma mr is sufficient */
2307 	if (count == 1) {
2308 		ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2309 		ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2310 		ib_sge->lkey = isert_conn->conn_mr->lkey;
2311 		wr->fr_desc = NULL;
2312 	} else {
2313 		spin_lock_irqsave(&isert_conn->conn_lock, flags);
2314 		fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
2315 					   struct fast_reg_descriptor, list);
2316 		list_del(&fr_desc->list);
2317 		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2318 		wr->fr_desc = fr_desc;
2319 
2320 		ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2321 				  ib_sge, offset, data_len);
2322 		if (ret) {
2323 			list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
2324 			goto unmap_sg;
2325 		}
2326 	}
2327 
2328 	return 0;
2329 
2330 unmap_sg:
2331 	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2332 			(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2333 			DMA_TO_DEVICE : DMA_FROM_DEVICE);
2334 	return ret;
2335 }
2336 
2337 static int
2338 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2339 {
2340 	struct se_cmd *se_cmd = &cmd->se_cmd;
2341 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2342 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2343 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2344 	struct isert_device *device = isert_conn->conn_device;
2345 	struct ib_send_wr *wr_failed;
2346 	int rc;
2347 
2348 	pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2349 		 isert_cmd, se_cmd->data_length);
2350 	wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2351 	rc = device->reg_rdma_mem(conn, cmd, wr);
2352 	if (rc) {
2353 		pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2354 		return rc;
2355 	}
2356 
2357 	/*
2358 	 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2359 	 */
2360 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2361 	iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2362 			     &isert_cmd->tx_desc.iscsi_header);
2363 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2364 	isert_init_send_wr(isert_conn, isert_cmd,
2365 			   &isert_cmd->tx_desc.send_wr, true);
2366 
2367 	atomic_inc(&isert_conn->post_send_buf_count);
2368 
2369 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2370 	if (rc) {
2371 		pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2372 		atomic_dec(&isert_conn->post_send_buf_count);
2373 	}
2374 	pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2375 		 isert_cmd);
2376 
2377 	return 1;
2378 }
2379 
2380 static int
2381 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2382 {
2383 	struct se_cmd *se_cmd = &cmd->se_cmd;
2384 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2385 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2386 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2387 	struct isert_device *device = isert_conn->conn_device;
2388 	struct ib_send_wr *wr_failed;
2389 	int rc;
2390 
2391 	pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2392 		 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2393 	wr->iser_ib_op = ISER_IB_RDMA_READ;
2394 	rc = device->reg_rdma_mem(conn, cmd, wr);
2395 	if (rc) {
2396 		pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2397 		return rc;
2398 	}
2399 
2400 	atomic_inc(&isert_conn->post_send_buf_count);
2401 
2402 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2403 	if (rc) {
2404 		pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2405 		atomic_dec(&isert_conn->post_send_buf_count);
2406 	}
2407 	pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2408 		 isert_cmd);
2409 
2410 	return 0;
2411 }
2412 
2413 static int
2414 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2415 {
2416 	int ret;
2417 
2418 	switch (state) {
2419 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2420 		ret = isert_put_nopin(cmd, conn, false);
2421 		break;
2422 	default:
2423 		pr_err("Unknown immediate state: 0x%02x\n", state);
2424 		ret = -EINVAL;
2425 		break;
2426 	}
2427 
2428 	return ret;
2429 }
2430 
2431 static int
2432 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2433 {
2434 	int ret;
2435 
2436 	switch (state) {
2437 	case ISTATE_SEND_LOGOUTRSP:
2438 		ret = isert_put_logout_rsp(cmd, conn);
2439 		if (!ret) {
2440 			pr_debug("Returning iSER Logout -EAGAIN\n");
2441 			ret = -EAGAIN;
2442 		}
2443 		break;
2444 	case ISTATE_SEND_NOPIN:
2445 		ret = isert_put_nopin(cmd, conn, true);
2446 		break;
2447 	case ISTATE_SEND_TASKMGTRSP:
2448 		ret = isert_put_tm_rsp(cmd, conn);
2449 		break;
2450 	case ISTATE_SEND_REJECT:
2451 		ret = isert_put_reject(cmd, conn);
2452 		break;
2453 	case ISTATE_SEND_TEXTRSP:
2454 		ret = isert_put_text_rsp(cmd, conn);
2455 		break;
2456 	case ISTATE_SEND_STATUS:
2457 		/*
2458 		 * Special case for sending non GOOD SCSI status from TX thread
2459 		 * context during pre se_cmd excecution failure.
2460 		 */
2461 		ret = isert_put_response(conn, cmd);
2462 		break;
2463 	default:
2464 		pr_err("Unknown response state: 0x%02x\n", state);
2465 		ret = -EINVAL;
2466 		break;
2467 	}
2468 
2469 	return ret;
2470 }
2471 
2472 static int
2473 isert_setup_np(struct iscsi_np *np,
2474 	       struct __kernel_sockaddr_storage *ksockaddr)
2475 {
2476 	struct isert_np *isert_np;
2477 	struct rdma_cm_id *isert_lid;
2478 	struct sockaddr *sa;
2479 	int ret;
2480 
2481 	isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2482 	if (!isert_np) {
2483 		pr_err("Unable to allocate struct isert_np\n");
2484 		return -ENOMEM;
2485 	}
2486 	init_waitqueue_head(&isert_np->np_accept_wq);
2487 	mutex_init(&isert_np->np_accept_mutex);
2488 	INIT_LIST_HEAD(&isert_np->np_accept_list);
2489 	init_completion(&isert_np->np_login_comp);
2490 
2491 	sa = (struct sockaddr *)ksockaddr;
2492 	pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2493 	/*
2494 	 * Setup the np->np_sockaddr from the passed sockaddr setup
2495 	 * in iscsi_target_configfs.c code..
2496 	 */
2497 	memcpy(&np->np_sockaddr, ksockaddr,
2498 	       sizeof(struct __kernel_sockaddr_storage));
2499 
2500 	isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2501 				IB_QPT_RC);
2502 	if (IS_ERR(isert_lid)) {
2503 		pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2504 		       PTR_ERR(isert_lid));
2505 		ret = PTR_ERR(isert_lid);
2506 		goto out;
2507 	}
2508 
2509 	ret = rdma_bind_addr(isert_lid, sa);
2510 	if (ret) {
2511 		pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2512 		goto out_lid;
2513 	}
2514 
2515 	ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2516 	if (ret) {
2517 		pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2518 		goto out_lid;
2519 	}
2520 
2521 	isert_np->np_cm_id = isert_lid;
2522 	np->np_context = isert_np;
2523 	pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2524 
2525 	return 0;
2526 
2527 out_lid:
2528 	rdma_destroy_id(isert_lid);
2529 out:
2530 	kfree(isert_np);
2531 	return ret;
2532 }
2533 
2534 static int
2535 isert_check_accept_queue(struct isert_np *isert_np)
2536 {
2537 	int empty;
2538 
2539 	mutex_lock(&isert_np->np_accept_mutex);
2540 	empty = list_empty(&isert_np->np_accept_list);
2541 	mutex_unlock(&isert_np->np_accept_mutex);
2542 
2543 	return empty;
2544 }
2545 
2546 static int
2547 isert_rdma_accept(struct isert_conn *isert_conn)
2548 {
2549 	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2550 	struct rdma_conn_param cp;
2551 	int ret;
2552 
2553 	memset(&cp, 0, sizeof(struct rdma_conn_param));
2554 	cp.responder_resources = isert_conn->responder_resources;
2555 	cp.initiator_depth = isert_conn->initiator_depth;
2556 	cp.retry_count = 7;
2557 	cp.rnr_retry_count = 7;
2558 
2559 	pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2560 
2561 	ret = rdma_accept(cm_id, &cp);
2562 	if (ret) {
2563 		pr_err("rdma_accept() failed with: %d\n", ret);
2564 		return ret;
2565 	}
2566 
2567 	pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2568 
2569 	return 0;
2570 }
2571 
2572 static int
2573 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2574 {
2575 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2576 	int ret;
2577 
2578 	pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2579 	/*
2580 	 * For login requests after the first PDU, isert_rx_login_req() will
2581 	 * kick schedule_delayed_work(&conn->login_work) as the packet is
2582 	 * received, which turns this callback from iscsi_target_do_login_rx()
2583 	 * into a NOP.
2584 	 */
2585 	if (!login->first_request)
2586 		return 0;
2587 
2588 	ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2589 	if (ret)
2590 		return ret;
2591 
2592 	pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2593 	return 0;
2594 }
2595 
2596 static void
2597 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2598 		    struct isert_conn *isert_conn)
2599 {
2600 	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2601 	struct rdma_route *cm_route = &cm_id->route;
2602 	struct sockaddr_in *sock_in;
2603 	struct sockaddr_in6 *sock_in6;
2604 
2605 	conn->login_family = np->np_sockaddr.ss_family;
2606 
2607 	if (np->np_sockaddr.ss_family == AF_INET6) {
2608 		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2609 		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2610 			 &sock_in6->sin6_addr.in6_u);
2611 		conn->login_port = ntohs(sock_in6->sin6_port);
2612 
2613 		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2614 		snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2615 			 &sock_in6->sin6_addr.in6_u);
2616 		conn->local_port = ntohs(sock_in6->sin6_port);
2617 	} else {
2618 		sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2619 		sprintf(conn->login_ip, "%pI4",
2620 			&sock_in->sin_addr.s_addr);
2621 		conn->login_port = ntohs(sock_in->sin_port);
2622 
2623 		sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2624 		sprintf(conn->local_ip, "%pI4",
2625 			&sock_in->sin_addr.s_addr);
2626 		conn->local_port = ntohs(sock_in->sin_port);
2627 	}
2628 }
2629 
2630 static int
2631 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2632 {
2633 	struct isert_np *isert_np = (struct isert_np *)np->np_context;
2634 	struct isert_conn *isert_conn;
2635 	int max_accept = 0, ret;
2636 
2637 accept_wait:
2638 	ret = wait_event_interruptible(isert_np->np_accept_wq,
2639 			!isert_check_accept_queue(isert_np) ||
2640 			np->np_thread_state == ISCSI_NP_THREAD_RESET);
2641 	if (max_accept > 5)
2642 		return -ENODEV;
2643 
2644 	spin_lock_bh(&np->np_thread_lock);
2645 	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2646 		spin_unlock_bh(&np->np_thread_lock);
2647 		pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2648 		return -ENODEV;
2649 	}
2650 	spin_unlock_bh(&np->np_thread_lock);
2651 
2652 	mutex_lock(&isert_np->np_accept_mutex);
2653 	if (list_empty(&isert_np->np_accept_list)) {
2654 		mutex_unlock(&isert_np->np_accept_mutex);
2655 		max_accept++;
2656 		goto accept_wait;
2657 	}
2658 	isert_conn = list_first_entry(&isert_np->np_accept_list,
2659 			struct isert_conn, conn_accept_node);
2660 	list_del_init(&isert_conn->conn_accept_node);
2661 	mutex_unlock(&isert_np->np_accept_mutex);
2662 
2663 	conn->context = isert_conn;
2664 	isert_conn->conn = conn;
2665 	max_accept = 0;
2666 
2667 	ret = isert_rdma_post_recvl(isert_conn);
2668 	if (ret)
2669 		return ret;
2670 
2671 	ret = isert_rdma_accept(isert_conn);
2672 	if (ret)
2673 		return ret;
2674 
2675 	isert_set_conn_info(np, conn, isert_conn);
2676 
2677 	pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2678 	return 0;
2679 }
2680 
2681 static void
2682 isert_free_np(struct iscsi_np *np)
2683 {
2684 	struct isert_np *isert_np = (struct isert_np *)np->np_context;
2685 
2686 	rdma_destroy_id(isert_np->np_cm_id);
2687 
2688 	np->np_context = NULL;
2689 	kfree(isert_np);
2690 }
2691 
2692 static int isert_check_state(struct isert_conn *isert_conn, int state)
2693 {
2694 	int ret;
2695 
2696 	mutex_lock(&isert_conn->conn_mutex);
2697 	ret = (isert_conn->state == state);
2698 	mutex_unlock(&isert_conn->conn_mutex);
2699 
2700 	return ret;
2701 }
2702 
2703 static void isert_free_conn(struct iscsi_conn *conn)
2704 {
2705 	struct isert_conn *isert_conn = conn->context;
2706 
2707 	pr_debug("isert_free_conn: Starting \n");
2708 	/*
2709 	 * Decrement post_send_buf_count for special case when called
2710 	 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2711 	 */
2712 	mutex_lock(&isert_conn->conn_mutex);
2713 	if (isert_conn->logout_posted)
2714 		atomic_dec(&isert_conn->post_send_buf_count);
2715 
2716 	if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2717 		pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2718 		rdma_disconnect(isert_conn->conn_cm_id);
2719 	}
2720 	/*
2721 	 * Only wait for conn_wait_comp_err if the isert_conn made it
2722 	 * into full feature phase..
2723 	 */
2724 	if (isert_conn->state == ISER_CONN_UP) {
2725 		pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2726 			 isert_conn->state);
2727 		mutex_unlock(&isert_conn->conn_mutex);
2728 
2729 		wait_event(isert_conn->conn_wait_comp_err,
2730 			  (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2731 
2732 		wait_event(isert_conn->conn_wait,
2733 			  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2734 
2735 		isert_put_conn(isert_conn);
2736 		return;
2737 	}
2738 	if (isert_conn->state == ISER_CONN_INIT) {
2739 		mutex_unlock(&isert_conn->conn_mutex);
2740 		isert_put_conn(isert_conn);
2741 		return;
2742 	}
2743 	pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2744 		 isert_conn->state);
2745 	mutex_unlock(&isert_conn->conn_mutex);
2746 
2747 	wait_event(isert_conn->conn_wait,
2748 		  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2749 
2750 	isert_put_conn(isert_conn);
2751 }
2752 
2753 static struct iscsit_transport iser_target_transport = {
2754 	.name			= "IB/iSER",
2755 	.transport_type		= ISCSI_INFINIBAND,
2756 	.priv_size		= sizeof(struct isert_cmd),
2757 	.owner			= THIS_MODULE,
2758 	.iscsit_setup_np	= isert_setup_np,
2759 	.iscsit_accept_np	= isert_accept_np,
2760 	.iscsit_free_np		= isert_free_np,
2761 	.iscsit_free_conn	= isert_free_conn,
2762 	.iscsit_get_login_rx	= isert_get_login_rx,
2763 	.iscsit_put_login_tx	= isert_put_login_tx,
2764 	.iscsit_immediate_queue	= isert_immediate_queue,
2765 	.iscsit_response_queue	= isert_response_queue,
2766 	.iscsit_get_dataout	= isert_get_dataout,
2767 	.iscsit_queue_data_in	= isert_put_datain,
2768 	.iscsit_queue_status	= isert_put_response,
2769 };
2770 
2771 static int __init isert_init(void)
2772 {
2773 	int ret;
2774 
2775 	isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2776 	if (!isert_rx_wq) {
2777 		pr_err("Unable to allocate isert_rx_wq\n");
2778 		return -ENOMEM;
2779 	}
2780 
2781 	isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2782 	if (!isert_comp_wq) {
2783 		pr_err("Unable to allocate isert_comp_wq\n");
2784 		ret = -ENOMEM;
2785 		goto destroy_rx_wq;
2786 	}
2787 
2788 	iscsit_register_transport(&iser_target_transport);
2789 	pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2790 	return 0;
2791 
2792 destroy_rx_wq:
2793 	destroy_workqueue(isert_rx_wq);
2794 	return ret;
2795 }
2796 
2797 static void __exit isert_exit(void)
2798 {
2799 	destroy_workqueue(isert_comp_wq);
2800 	destroy_workqueue(isert_rx_wq);
2801 	iscsit_unregister_transport(&iser_target_transport);
2802 	pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2803 }
2804 
2805 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2806 MODULE_VERSION("0.1");
2807 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2808 MODULE_LICENSE("GPL");
2809 
2810 module_init(isert_init);
2811 module_exit(isert_exit);
2812