1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 RisingTide Systems LLC.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18 
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 
31 #include "isert_proto.h"
32 #include "ib_isert.h"
33 
34 #define	ISERT_MAX_CONN		8
35 #define ISER_MAX_RX_CQ_LEN	(ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN	(ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
37 
38 static DEFINE_MUTEX(device_list_mutex);
39 static LIST_HEAD(device_list);
40 static struct workqueue_struct *isert_rx_wq;
41 static struct workqueue_struct *isert_comp_wq;
42 static struct kmem_cache *isert_cmd_cache;
43 
44 static void
45 isert_qp_event_callback(struct ib_event *e, void *context)
46 {
47 	struct isert_conn *isert_conn = (struct isert_conn *)context;
48 
49 	pr_err("isert_qp_event_callback event: %d\n", e->event);
50 	switch (e->event) {
51 	case IB_EVENT_COMM_EST:
52 		rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
53 		break;
54 	case IB_EVENT_QP_LAST_WQE_REACHED:
55 		pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
56 		break;
57 	default:
58 		break;
59 	}
60 }
61 
62 static int
63 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
64 {
65 	int ret;
66 
67 	ret = ib_query_device(ib_dev, devattr);
68 	if (ret) {
69 		pr_err("ib_query_device() failed: %d\n", ret);
70 		return ret;
71 	}
72 	pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
73 	pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
74 
75 	return 0;
76 }
77 
78 static int
79 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
80 {
81 	struct isert_device *device = isert_conn->conn_device;
82 	struct ib_qp_init_attr attr;
83 	struct ib_device_attr devattr;
84 	int ret, index, min_index = 0;
85 
86 	memset(&devattr, 0, sizeof(struct ib_device_attr));
87 	ret = isert_query_device(cma_id->device, &devattr);
88 	if (ret)
89 		return ret;
90 
91 	mutex_lock(&device_list_mutex);
92 	for (index = 0; index < device->cqs_used; index++)
93 		if (device->cq_active_qps[index] <
94 		    device->cq_active_qps[min_index])
95 			min_index = index;
96 	device->cq_active_qps[min_index]++;
97 	pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
98 	mutex_unlock(&device_list_mutex);
99 
100 	memset(&attr, 0, sizeof(struct ib_qp_init_attr));
101 	attr.event_handler = isert_qp_event_callback;
102 	attr.qp_context = isert_conn;
103 	attr.send_cq = device->dev_tx_cq[min_index];
104 	attr.recv_cq = device->dev_rx_cq[min_index];
105 	attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
106 	attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
107 	/*
108 	 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
109 	 * work-around for RDMA_READ..
110 	 */
111 	attr.cap.max_send_sge = devattr.max_sge - 2;
112 	isert_conn->max_sge = attr.cap.max_send_sge;
113 
114 	attr.cap.max_recv_sge = 1;
115 	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
116 	attr.qp_type = IB_QPT_RC;
117 
118 	pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
119 		 cma_id->device);
120 	pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
121 		 isert_conn->conn_pd->device);
122 
123 	ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
124 	if (ret) {
125 		pr_err("rdma_create_qp failed for cma_id %d\n", ret);
126 		return ret;
127 	}
128 	isert_conn->conn_qp = cma_id->qp;
129 	pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
130 
131 	return 0;
132 }
133 
134 static void
135 isert_cq_event_callback(struct ib_event *e, void *context)
136 {
137 	pr_debug("isert_cq_event_callback event: %d\n", e->event);
138 }
139 
140 static int
141 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
142 {
143 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
144 	struct iser_rx_desc *rx_desc;
145 	struct ib_sge *rx_sg;
146 	u64 dma_addr;
147 	int i, j;
148 
149 	isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
150 				sizeof(struct iser_rx_desc), GFP_KERNEL);
151 	if (!isert_conn->conn_rx_descs)
152 		goto fail;
153 
154 	rx_desc = isert_conn->conn_rx_descs;
155 
156 	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
157 		dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
158 					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
159 		if (ib_dma_mapping_error(ib_dev, dma_addr))
160 			goto dma_map_fail;
161 
162 		rx_desc->dma_addr = dma_addr;
163 
164 		rx_sg = &rx_desc->rx_sg;
165 		rx_sg->addr = rx_desc->dma_addr;
166 		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
167 		rx_sg->lkey = isert_conn->conn_mr->lkey;
168 	}
169 
170 	isert_conn->conn_rx_desc_head = 0;
171 	return 0;
172 
173 dma_map_fail:
174 	rx_desc = isert_conn->conn_rx_descs;
175 	for (j = 0; j < i; j++, rx_desc++) {
176 		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
177 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
178 	}
179 	kfree(isert_conn->conn_rx_descs);
180 	isert_conn->conn_rx_descs = NULL;
181 fail:
182 	return -ENOMEM;
183 }
184 
185 static void
186 isert_free_rx_descriptors(struct isert_conn *isert_conn)
187 {
188 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
189 	struct iser_rx_desc *rx_desc;
190 	int i;
191 
192 	if (!isert_conn->conn_rx_descs)
193 		return;
194 
195 	rx_desc = isert_conn->conn_rx_descs;
196 	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
197 		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
198 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
199 	}
200 
201 	kfree(isert_conn->conn_rx_descs);
202 	isert_conn->conn_rx_descs = NULL;
203 }
204 
205 static void isert_cq_tx_callback(struct ib_cq *, void *);
206 static void isert_cq_rx_callback(struct ib_cq *, void *);
207 
208 static int
209 isert_create_device_ib_res(struct isert_device *device)
210 {
211 	struct ib_device *ib_dev = device->ib_device;
212 	struct isert_cq_desc *cq_desc;
213 	int ret = 0, i, j;
214 
215 	device->cqs_used = min_t(int, num_online_cpus(),
216 				 device->ib_device->num_comp_vectors);
217 	device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
218 	pr_debug("Using %d CQs, device %s supports %d vectors\n",
219 		 device->cqs_used, device->ib_device->name,
220 		 device->ib_device->num_comp_vectors);
221 	device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
222 				device->cqs_used, GFP_KERNEL);
223 	if (!device->cq_desc) {
224 		pr_err("Unable to allocate device->cq_desc\n");
225 		return -ENOMEM;
226 	}
227 	cq_desc = device->cq_desc;
228 
229 	device->dev_pd = ib_alloc_pd(ib_dev);
230 	if (IS_ERR(device->dev_pd)) {
231 		ret = PTR_ERR(device->dev_pd);
232 		pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
233 		goto out_cq_desc;
234 	}
235 
236 	for (i = 0; i < device->cqs_used; i++) {
237 		cq_desc[i].device = device;
238 		cq_desc[i].cq_index = i;
239 
240 		device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
241 						isert_cq_rx_callback,
242 						isert_cq_event_callback,
243 						(void *)&cq_desc[i],
244 						ISER_MAX_RX_CQ_LEN, i);
245 		if (IS_ERR(device->dev_rx_cq[i]))
246 			goto out_cq;
247 
248 		device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
249 						isert_cq_tx_callback,
250 						isert_cq_event_callback,
251 						(void *)&cq_desc[i],
252 						ISER_MAX_TX_CQ_LEN, i);
253 		if (IS_ERR(device->dev_tx_cq[i]))
254 			goto out_cq;
255 
256 		if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
257 			goto out_cq;
258 
259 		if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
260 			goto out_cq;
261 	}
262 
263 	device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
264 	if (IS_ERR(device->dev_mr)) {
265 		ret = PTR_ERR(device->dev_mr);
266 		pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
267 		goto out_cq;
268 	}
269 
270 	return 0;
271 
272 out_cq:
273 	for (j = 0; j < i; j++) {
274 		cq_desc = &device->cq_desc[j];
275 
276 		if (device->dev_rx_cq[j]) {
277 			cancel_work_sync(&cq_desc->cq_rx_work);
278 			ib_destroy_cq(device->dev_rx_cq[j]);
279 		}
280 		if (device->dev_tx_cq[j]) {
281 			cancel_work_sync(&cq_desc->cq_tx_work);
282 			ib_destroy_cq(device->dev_tx_cq[j]);
283 		}
284 	}
285 	ib_dealloc_pd(device->dev_pd);
286 
287 out_cq_desc:
288 	kfree(device->cq_desc);
289 
290 	return ret;
291 }
292 
293 static void
294 isert_free_device_ib_res(struct isert_device *device)
295 {
296 	struct isert_cq_desc *cq_desc;
297 	int i;
298 
299 	for (i = 0; i < device->cqs_used; i++) {
300 		cq_desc = &device->cq_desc[i];
301 
302 		cancel_work_sync(&cq_desc->cq_rx_work);
303 		cancel_work_sync(&cq_desc->cq_tx_work);
304 		ib_destroy_cq(device->dev_rx_cq[i]);
305 		ib_destroy_cq(device->dev_tx_cq[i]);
306 		device->dev_rx_cq[i] = NULL;
307 		device->dev_tx_cq[i] = NULL;
308 	}
309 
310 	ib_dereg_mr(device->dev_mr);
311 	ib_dealloc_pd(device->dev_pd);
312 	kfree(device->cq_desc);
313 }
314 
315 static void
316 isert_device_try_release(struct isert_device *device)
317 {
318 	mutex_lock(&device_list_mutex);
319 	device->refcount--;
320 	if (!device->refcount) {
321 		isert_free_device_ib_res(device);
322 		list_del(&device->dev_node);
323 		kfree(device);
324 	}
325 	mutex_unlock(&device_list_mutex);
326 }
327 
328 static struct isert_device *
329 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
330 {
331 	struct isert_device *device;
332 	int ret;
333 
334 	mutex_lock(&device_list_mutex);
335 	list_for_each_entry(device, &device_list, dev_node) {
336 		if (device->ib_device->node_guid == cma_id->device->node_guid) {
337 			device->refcount++;
338 			mutex_unlock(&device_list_mutex);
339 			return device;
340 		}
341 	}
342 
343 	device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
344 	if (!device) {
345 		mutex_unlock(&device_list_mutex);
346 		return ERR_PTR(-ENOMEM);
347 	}
348 
349 	INIT_LIST_HEAD(&device->dev_node);
350 
351 	device->ib_device = cma_id->device;
352 	ret = isert_create_device_ib_res(device);
353 	if (ret) {
354 		kfree(device);
355 		mutex_unlock(&device_list_mutex);
356 		return ERR_PTR(ret);
357 	}
358 
359 	device->refcount++;
360 	list_add_tail(&device->dev_node, &device_list);
361 	mutex_unlock(&device_list_mutex);
362 
363 	return device;
364 }
365 
366 static int
367 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
368 {
369 	struct iscsi_np *np = cma_id->context;
370 	struct isert_np *isert_np = np->np_context;
371 	struct isert_conn *isert_conn;
372 	struct isert_device *device;
373 	struct ib_device *ib_dev = cma_id->device;
374 	int ret = 0;
375 
376 	pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
377 		 cma_id, cma_id->context);
378 
379 	isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
380 	if (!isert_conn) {
381 		pr_err("Unable to allocate isert_conn\n");
382 		return -ENOMEM;
383 	}
384 	isert_conn->state = ISER_CONN_INIT;
385 	INIT_LIST_HEAD(&isert_conn->conn_accept_node);
386 	init_completion(&isert_conn->conn_login_comp);
387 	init_waitqueue_head(&isert_conn->conn_wait);
388 	init_waitqueue_head(&isert_conn->conn_wait_comp_err);
389 	kref_init(&isert_conn->conn_kref);
390 	kref_get(&isert_conn->conn_kref);
391 	mutex_init(&isert_conn->conn_mutex);
392 
393 	cma_id->context = isert_conn;
394 	isert_conn->conn_cm_id = cma_id;
395 	isert_conn->responder_resources = event->param.conn.responder_resources;
396 	isert_conn->initiator_depth = event->param.conn.initiator_depth;
397 	pr_debug("Using responder_resources: %u initiator_depth: %u\n",
398 		 isert_conn->responder_resources, isert_conn->initiator_depth);
399 
400 	isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
401 					ISER_RX_LOGIN_SIZE, GFP_KERNEL);
402 	if (!isert_conn->login_buf) {
403 		pr_err("Unable to allocate isert_conn->login_buf\n");
404 		ret = -ENOMEM;
405 		goto out;
406 	}
407 
408 	isert_conn->login_req_buf = isert_conn->login_buf;
409 	isert_conn->login_rsp_buf = isert_conn->login_buf +
410 				    ISCSI_DEF_MAX_RECV_SEG_LEN;
411 	pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
412 		 isert_conn->login_buf, isert_conn->login_req_buf,
413 		 isert_conn->login_rsp_buf);
414 
415 	isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
416 				(void *)isert_conn->login_req_buf,
417 				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
418 
419 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
420 	if (ret) {
421 		pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
422 		       ret);
423 		isert_conn->login_req_dma = 0;
424 		goto out_login_buf;
425 	}
426 
427 	isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
428 					(void *)isert_conn->login_rsp_buf,
429 					ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
430 
431 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
432 	if (ret) {
433 		pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
434 		       ret);
435 		isert_conn->login_rsp_dma = 0;
436 		goto out_req_dma_map;
437 	}
438 
439 	device = isert_device_find_by_ib_dev(cma_id);
440 	if (IS_ERR(device)) {
441 		ret = PTR_ERR(device);
442 		goto out_rsp_dma_map;
443 	}
444 
445 	isert_conn->conn_device = device;
446 	isert_conn->conn_pd = device->dev_pd;
447 	isert_conn->conn_mr = device->dev_mr;
448 
449 	ret = isert_conn_setup_qp(isert_conn, cma_id);
450 	if (ret)
451 		goto out_conn_dev;
452 
453 	mutex_lock(&isert_np->np_accept_mutex);
454 	list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
455 	mutex_unlock(&isert_np->np_accept_mutex);
456 
457 	pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
458 	wake_up(&isert_np->np_accept_wq);
459 	return 0;
460 
461 out_conn_dev:
462 	isert_device_try_release(device);
463 out_rsp_dma_map:
464 	ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
465 			    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
466 out_req_dma_map:
467 	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
468 			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
469 out_login_buf:
470 	kfree(isert_conn->login_buf);
471 out:
472 	kfree(isert_conn);
473 	return ret;
474 }
475 
476 static void
477 isert_connect_release(struct isert_conn *isert_conn)
478 {
479 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
480 	struct isert_device *device = isert_conn->conn_device;
481 	int cq_index;
482 
483 	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
484 
485 	if (isert_conn->conn_qp) {
486 		cq_index = ((struct isert_cq_desc *)
487 			isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
488 		pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
489 		isert_conn->conn_device->cq_active_qps[cq_index]--;
490 
491 		rdma_destroy_qp(isert_conn->conn_cm_id);
492 	}
493 
494 	isert_free_rx_descriptors(isert_conn);
495 	rdma_destroy_id(isert_conn->conn_cm_id);
496 
497 	if (isert_conn->login_buf) {
498 		ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
499 				    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
500 		ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
501 				    ISCSI_DEF_MAX_RECV_SEG_LEN,
502 				    DMA_FROM_DEVICE);
503 		kfree(isert_conn->login_buf);
504 	}
505 	kfree(isert_conn);
506 
507 	if (device)
508 		isert_device_try_release(device);
509 
510 	pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
511 }
512 
513 static void
514 isert_connected_handler(struct rdma_cm_id *cma_id)
515 {
516 	return;
517 }
518 
519 static void
520 isert_release_conn_kref(struct kref *kref)
521 {
522 	struct isert_conn *isert_conn = container_of(kref,
523 				struct isert_conn, conn_kref);
524 
525 	pr_debug("Calling isert_connect_release for final kref %s/%d\n",
526 		 current->comm, current->pid);
527 
528 	isert_connect_release(isert_conn);
529 }
530 
531 static void
532 isert_put_conn(struct isert_conn *isert_conn)
533 {
534 	kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
535 }
536 
537 static void
538 isert_disconnect_work(struct work_struct *work)
539 {
540 	struct isert_conn *isert_conn = container_of(work,
541 				struct isert_conn, conn_logout_work);
542 
543 	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
544 	mutex_lock(&isert_conn->conn_mutex);
545 	isert_conn->state = ISER_CONN_DOWN;
546 
547 	if (isert_conn->post_recv_buf_count == 0 &&
548 	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
549 		pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
550 		mutex_unlock(&isert_conn->conn_mutex);
551 		goto wake_up;
552 	}
553 	if (!isert_conn->conn_cm_id) {
554 		mutex_unlock(&isert_conn->conn_mutex);
555 		isert_put_conn(isert_conn);
556 		return;
557 	}
558 	if (!isert_conn->logout_posted) {
559 		pr_debug("Calling rdma_disconnect for !logout_posted from"
560 			 " isert_disconnect_work\n");
561 		rdma_disconnect(isert_conn->conn_cm_id);
562 		mutex_unlock(&isert_conn->conn_mutex);
563 		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
564 		goto wake_up;
565 	}
566 	mutex_unlock(&isert_conn->conn_mutex);
567 
568 wake_up:
569 	wake_up(&isert_conn->conn_wait);
570 	isert_put_conn(isert_conn);
571 }
572 
573 static void
574 isert_disconnected_handler(struct rdma_cm_id *cma_id)
575 {
576 	struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
577 
578 	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
579 	schedule_work(&isert_conn->conn_logout_work);
580 }
581 
582 static int
583 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
584 {
585 	int ret = 0;
586 
587 	pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
588 		 event->event, event->status, cma_id->context, cma_id);
589 
590 	switch (event->event) {
591 	case RDMA_CM_EVENT_CONNECT_REQUEST:
592 		pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
593 		ret = isert_connect_request(cma_id, event);
594 		break;
595 	case RDMA_CM_EVENT_ESTABLISHED:
596 		pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
597 		isert_connected_handler(cma_id);
598 		break;
599 	case RDMA_CM_EVENT_DISCONNECTED:
600 		pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
601 		isert_disconnected_handler(cma_id);
602 		break;
603 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
604 	case RDMA_CM_EVENT_ADDR_CHANGE:
605 		break;
606 	case RDMA_CM_EVENT_CONNECT_ERROR:
607 	default:
608 		pr_err("Unknown RDMA CMA event: %d\n", event->event);
609 		break;
610 	}
611 
612 	if (ret != 0) {
613 		pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
614 		       event->event, ret);
615 		dump_stack();
616 	}
617 
618 	return ret;
619 }
620 
621 static int
622 isert_post_recv(struct isert_conn *isert_conn, u32 count)
623 {
624 	struct ib_recv_wr *rx_wr, *rx_wr_failed;
625 	int i, ret;
626 	unsigned int rx_head = isert_conn->conn_rx_desc_head;
627 	struct iser_rx_desc *rx_desc;
628 
629 	for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
630 		rx_desc		= &isert_conn->conn_rx_descs[rx_head];
631 		rx_wr->wr_id	= (unsigned long)rx_desc;
632 		rx_wr->sg_list	= &rx_desc->rx_sg;
633 		rx_wr->num_sge	= 1;
634 		rx_wr->next	= rx_wr + 1;
635 		rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
636 	}
637 
638 	rx_wr--;
639 	rx_wr->next = NULL; /* mark end of work requests list */
640 
641 	isert_conn->post_recv_buf_count += count;
642 	ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
643 				&rx_wr_failed);
644 	if (ret) {
645 		pr_err("ib_post_recv() failed with ret: %d\n", ret);
646 		isert_conn->post_recv_buf_count -= count;
647 	} else {
648 		pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
649 		isert_conn->conn_rx_desc_head = rx_head;
650 	}
651 	return ret;
652 }
653 
654 static int
655 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
656 {
657 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
658 	struct ib_send_wr send_wr, *send_wr_failed;
659 	int ret;
660 
661 	ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
662 				      ISER_HEADERS_LEN, DMA_TO_DEVICE);
663 
664 	send_wr.next	= NULL;
665 	send_wr.wr_id	= (unsigned long)tx_desc;
666 	send_wr.sg_list	= tx_desc->tx_sg;
667 	send_wr.num_sge	= tx_desc->num_sge;
668 	send_wr.opcode	= IB_WR_SEND;
669 	send_wr.send_flags = IB_SEND_SIGNALED;
670 
671 	atomic_inc(&isert_conn->post_send_buf_count);
672 
673 	ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
674 	if (ret) {
675 		pr_err("ib_post_send() failed, ret: %d\n", ret);
676 		atomic_dec(&isert_conn->post_send_buf_count);
677 	}
678 
679 	return ret;
680 }
681 
682 static void
683 isert_create_send_desc(struct isert_conn *isert_conn,
684 		       struct isert_cmd *isert_cmd,
685 		       struct iser_tx_desc *tx_desc)
686 {
687 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
688 
689 	ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
690 				   ISER_HEADERS_LEN, DMA_TO_DEVICE);
691 
692 	memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
693 	tx_desc->iser_header.flags = ISER_VER;
694 
695 	tx_desc->num_sge = 1;
696 	tx_desc->isert_cmd = isert_cmd;
697 
698 	if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
699 		tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
700 		pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
701 	}
702 }
703 
704 static int
705 isert_init_tx_hdrs(struct isert_conn *isert_conn,
706 		   struct iser_tx_desc *tx_desc)
707 {
708 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
709 	u64 dma_addr;
710 
711 	dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
712 			ISER_HEADERS_LEN, DMA_TO_DEVICE);
713 	if (ib_dma_mapping_error(ib_dev, dma_addr)) {
714 		pr_err("ib_dma_mapping_error() failed\n");
715 		return -ENOMEM;
716 	}
717 
718 	tx_desc->dma_addr = dma_addr;
719 	tx_desc->tx_sg[0].addr	= tx_desc->dma_addr;
720 	tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
721 	tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
722 
723 	pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
724 		 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
725 		 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
726 
727 	return 0;
728 }
729 
730 static void
731 isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
732 {
733 	isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
734 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
735 	send_wr->opcode = IB_WR_SEND;
736 	send_wr->send_flags = IB_SEND_SIGNALED;
737 	send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
738 	send_wr->num_sge = isert_cmd->tx_desc.num_sge;
739 }
740 
741 static int
742 isert_rdma_post_recvl(struct isert_conn *isert_conn)
743 {
744 	struct ib_recv_wr rx_wr, *rx_wr_fail;
745 	struct ib_sge sge;
746 	int ret;
747 
748 	memset(&sge, 0, sizeof(struct ib_sge));
749 	sge.addr = isert_conn->login_req_dma;
750 	sge.length = ISER_RX_LOGIN_SIZE;
751 	sge.lkey = isert_conn->conn_mr->lkey;
752 
753 	pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
754 		sge.addr, sge.length, sge.lkey);
755 
756 	memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
757 	rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
758 	rx_wr.sg_list = &sge;
759 	rx_wr.num_sge = 1;
760 
761 	isert_conn->post_recv_buf_count++;
762 	ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
763 	if (ret) {
764 		pr_err("ib_post_recv() failed: %d\n", ret);
765 		isert_conn->post_recv_buf_count--;
766 	}
767 
768 	pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
769 	return ret;
770 }
771 
772 static int
773 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
774 		   u32 length)
775 {
776 	struct isert_conn *isert_conn = conn->context;
777 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
778 	struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
779 	int ret;
780 
781 	isert_create_send_desc(isert_conn, NULL, tx_desc);
782 
783 	memcpy(&tx_desc->iscsi_header, &login->rsp[0],
784 	       sizeof(struct iscsi_hdr));
785 
786 	isert_init_tx_hdrs(isert_conn, tx_desc);
787 
788 	if (length > 0) {
789 		struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
790 
791 		ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
792 					   length, DMA_TO_DEVICE);
793 
794 		memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
795 
796 		ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
797 					      length, DMA_TO_DEVICE);
798 
799 		tx_dsg->addr	= isert_conn->login_rsp_dma;
800 		tx_dsg->length	= length;
801 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
802 		tx_desc->num_sge = 2;
803 	}
804 	if (!login->login_failed) {
805 		if (login->login_complete) {
806 			ret = isert_alloc_rx_descriptors(isert_conn);
807 			if (ret)
808 				return ret;
809 
810 			ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
811 			if (ret)
812 				return ret;
813 
814 			isert_conn->state = ISER_CONN_UP;
815 			goto post_send;
816 		}
817 
818 		ret = isert_rdma_post_recvl(isert_conn);
819 		if (ret)
820 			return ret;
821 	}
822 post_send:
823 	ret = isert_post_send(isert_conn, tx_desc);
824 	if (ret)
825 		return ret;
826 
827 	return 0;
828 }
829 
830 static void
831 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
832 		   struct isert_conn *isert_conn)
833 {
834 	struct iscsi_conn *conn = isert_conn->conn;
835 	struct iscsi_login *login = conn->conn_login;
836 	int size;
837 
838 	if (!login) {
839 		pr_err("conn->conn_login is NULL\n");
840 		dump_stack();
841 		return;
842 	}
843 
844 	if (login->first_request) {
845 		struct iscsi_login_req *login_req =
846 			(struct iscsi_login_req *)&rx_desc->iscsi_header;
847 		/*
848 		 * Setup the initial iscsi_login values from the leading
849 		 * login request PDU.
850 		 */
851 		login->leading_connection = (!login_req->tsih) ? 1 : 0;
852 		login->current_stage =
853 			(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
854 			 >> 2;
855 		login->version_min	= login_req->min_version;
856 		login->version_max	= login_req->max_version;
857 		memcpy(login->isid, login_req->isid, 6);
858 		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
859 		login->init_task_tag	= login_req->itt;
860 		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
861 		login->cid		= be16_to_cpu(login_req->cid);
862 		login->tsih		= be16_to_cpu(login_req->tsih);
863 	}
864 
865 	memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
866 
867 	size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
868 	pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
869 		 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
870 	memcpy(login->req_buf, &rx_desc->data[0], size);
871 
872 	complete(&isert_conn->conn_login_comp);
873 }
874 
875 static void
876 isert_release_cmd(struct iscsi_cmd *cmd)
877 {
878 	struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
879 						   iscsi_cmd);
880 
881 	pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
882 
883 	kfree(cmd->buf_ptr);
884 	kfree(cmd->tmr_req);
885 
886 	kmem_cache_free(isert_cmd_cache, isert_cmd);
887 }
888 
889 static struct iscsi_cmd
890 *isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
891 {
892 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
893 	struct isert_cmd *isert_cmd;
894 
895 	isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
896 	if (!isert_cmd) {
897 		pr_err("Unable to allocate isert_cmd\n");
898 		return NULL;
899 	}
900 	isert_cmd->conn = isert_conn;
901 	isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
902 
903 	return &isert_cmd->iscsi_cmd;
904 }
905 
906 static int
907 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
908 		      struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
909 		      unsigned char *buf)
910 {
911 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
912 	struct iscsi_conn *conn = isert_conn->conn;
913 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
914 	struct scatterlist *sg;
915 	int imm_data, imm_data_len, unsol_data, sg_nents, rc;
916 	bool dump_payload = false;
917 
918 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
919 	if (rc < 0)
920 		return rc;
921 
922 	imm_data = cmd->immediate_data;
923 	imm_data_len = cmd->first_burst_len;
924 	unsol_data = cmd->unsolicited_data;
925 
926 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
927 	if (rc < 0) {
928 		return 0;
929 	} else if (rc > 0) {
930 		dump_payload = true;
931 		goto sequence_cmd;
932 	}
933 
934 	if (!imm_data)
935 		return 0;
936 
937 	sg = &cmd->se_cmd.t_data_sg[0];
938 	sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
939 
940 	pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
941 		 sg, sg_nents, &rx_desc->data[0], imm_data_len);
942 
943 	sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
944 
945 	cmd->write_data_done += imm_data_len;
946 
947 	if (cmd->write_data_done == cmd->se_cmd.data_length) {
948 		spin_lock_bh(&cmd->istate_lock);
949 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
950 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
951 		spin_unlock_bh(&cmd->istate_lock);
952 	}
953 
954 sequence_cmd:
955 	rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
956 
957 	if (!rc && dump_payload == false && unsol_data)
958 		iscsit_set_unsoliticed_dataout(cmd);
959 
960 	return 0;
961 }
962 
963 static int
964 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
965 			   struct iser_rx_desc *rx_desc, unsigned char *buf)
966 {
967 	struct scatterlist *sg_start;
968 	struct iscsi_conn *conn = isert_conn->conn;
969 	struct iscsi_cmd *cmd = NULL;
970 	struct iscsi_data *hdr = (struct iscsi_data *)buf;
971 	u32 unsol_data_len = ntoh24(hdr->dlength);
972 	int rc, sg_nents, sg_off, page_off;
973 
974 	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
975 	if (rc < 0)
976 		return rc;
977 	else if (!cmd)
978 		return 0;
979 	/*
980 	 * FIXME: Unexpected unsolicited_data out
981 	 */
982 	if (!cmd->unsolicited_data) {
983 		pr_err("Received unexpected solicited data payload\n");
984 		dump_stack();
985 		return -1;
986 	}
987 
988 	pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
989 		 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
990 
991 	sg_off = cmd->write_data_done / PAGE_SIZE;
992 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
993 	sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
994 	page_off = cmd->write_data_done % PAGE_SIZE;
995 	/*
996 	 * FIXME: Non page-aligned unsolicited_data out
997 	 */
998 	if (page_off) {
999 		pr_err("Received unexpected non-page aligned data payload\n");
1000 		dump_stack();
1001 		return -1;
1002 	}
1003 	pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1004 		 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1005 
1006 	sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1007 			    unsol_data_len);
1008 
1009 	rc = iscsit_check_dataout_payload(cmd, hdr, false);
1010 	if (rc < 0)
1011 		return rc;
1012 
1013 	return 0;
1014 }
1015 
1016 static int
1017 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1018 		     struct iser_rx_desc *rx_desc, unsigned char *buf)
1019 {
1020 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1021 	struct iscsi_conn *conn = isert_conn->conn;
1022 	struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1023 	int rc;
1024 
1025 	rc = iscsit_setup_nop_out(conn, cmd, hdr);
1026 	if (rc < 0)
1027 		return rc;
1028 	/*
1029 	 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1030 	 */
1031 
1032 	return iscsit_process_nop_out(conn, cmd, hdr);
1033 }
1034 
1035 static int
1036 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1037 		      struct iser_rx_desc *rx_desc, struct iscsi_text *hdr)
1038 {
1039 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1040 	struct iscsi_conn *conn = isert_conn->conn;
1041 	u32 payload_length = ntoh24(hdr->dlength);
1042 	int rc;
1043 	unsigned char *text_in;
1044 
1045 	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1046 	if (rc < 0)
1047 		return rc;
1048 
1049 	text_in = kzalloc(payload_length, GFP_KERNEL);
1050 	if (!text_in) {
1051 		pr_err("Unable to allocate text_in of payload_length: %u\n",
1052 		       payload_length);
1053 		return -ENOMEM;
1054 	}
1055 	cmd->text_in_ptr = text_in;
1056 
1057 	memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1058 
1059 	return iscsit_process_text_cmd(conn, cmd, hdr);
1060 }
1061 
1062 static int
1063 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1064 		uint32_t read_stag, uint64_t read_va,
1065 		uint32_t write_stag, uint64_t write_va)
1066 {
1067 	struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1068 	struct iscsi_conn *conn = isert_conn->conn;
1069 	struct iscsi_session *sess = conn->sess;
1070 	struct iscsi_cmd *cmd;
1071 	struct isert_cmd *isert_cmd;
1072 	int ret = -EINVAL;
1073 	u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1074 
1075 	if (sess->sess_ops->SessionType &&
1076 	   (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1077 		pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1078 		       " ignoring\n", opcode);
1079 		return 0;
1080 	}
1081 
1082 	switch (opcode) {
1083 	case ISCSI_OP_SCSI_CMD:
1084 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1085 		if (!cmd)
1086 			break;
1087 
1088 		isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1089 		isert_cmd->read_stag = read_stag;
1090 		isert_cmd->read_va = read_va;
1091 		isert_cmd->write_stag = write_stag;
1092 		isert_cmd->write_va = write_va;
1093 
1094 		ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
1095 					rx_desc, (unsigned char *)hdr);
1096 		break;
1097 	case ISCSI_OP_NOOP_OUT:
1098 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1099 		if (!cmd)
1100 			break;
1101 
1102 		isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1103 		ret = isert_handle_nop_out(isert_conn, isert_cmd,
1104 					   rx_desc, (unsigned char *)hdr);
1105 		break;
1106 	case ISCSI_OP_SCSI_DATA_OUT:
1107 		ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1108 						(unsigned char *)hdr);
1109 		break;
1110 	case ISCSI_OP_SCSI_TMFUNC:
1111 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1112 		if (!cmd)
1113 			break;
1114 
1115 		ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1116 						(unsigned char *)hdr);
1117 		break;
1118 	case ISCSI_OP_LOGOUT:
1119 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1120 		if (!cmd)
1121 			break;
1122 
1123 		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1124 		if (ret > 0)
1125 			wait_for_completion_timeout(&conn->conn_logout_comp,
1126 						    SECONDS_FOR_LOGOUT_COMP *
1127 						    HZ);
1128 		break;
1129 	case ISCSI_OP_TEXT:
1130 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1131 		if (!cmd)
1132 			break;
1133 
1134 		isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1135 		ret = isert_handle_text_cmd(isert_conn, isert_cmd,
1136 					    rx_desc, (struct iscsi_text *)hdr);
1137 		break;
1138 	default:
1139 		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1140 		dump_stack();
1141 		break;
1142 	}
1143 
1144 	return ret;
1145 }
1146 
1147 static void
1148 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1149 {
1150 	struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1151 	uint64_t read_va = 0, write_va = 0;
1152 	uint32_t read_stag = 0, write_stag = 0;
1153 	int rc;
1154 
1155 	switch (iser_hdr->flags & 0xF0) {
1156 	case ISCSI_CTRL:
1157 		if (iser_hdr->flags & ISER_RSV) {
1158 			read_stag = be32_to_cpu(iser_hdr->read_stag);
1159 			read_va = be64_to_cpu(iser_hdr->read_va);
1160 			pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1161 				 read_stag, (unsigned long long)read_va);
1162 		}
1163 		if (iser_hdr->flags & ISER_WSV) {
1164 			write_stag = be32_to_cpu(iser_hdr->write_stag);
1165 			write_va = be64_to_cpu(iser_hdr->write_va);
1166 			pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1167 				 write_stag, (unsigned long long)write_va);
1168 		}
1169 
1170 		pr_debug("ISER ISCSI_CTRL PDU\n");
1171 		break;
1172 	case ISER_HELLO:
1173 		pr_err("iSER Hello message\n");
1174 		break;
1175 	default:
1176 		pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1177 		break;
1178 	}
1179 
1180 	rc = isert_rx_opcode(isert_conn, rx_desc,
1181 			     read_stag, read_va, write_stag, write_va);
1182 }
1183 
1184 static void
1185 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1186 		    unsigned long xfer_len)
1187 {
1188 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1189 	struct iscsi_hdr *hdr;
1190 	u64 rx_dma;
1191 	int rx_buflen, outstanding;
1192 
1193 	if ((char *)desc == isert_conn->login_req_buf) {
1194 		rx_dma = isert_conn->login_req_dma;
1195 		rx_buflen = ISER_RX_LOGIN_SIZE;
1196 		pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1197 			 rx_dma, rx_buflen);
1198 	} else {
1199 		rx_dma = desc->dma_addr;
1200 		rx_buflen = ISER_RX_PAYLOAD_SIZE;
1201 		pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1202 			 rx_dma, rx_buflen);
1203 	}
1204 
1205 	ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1206 
1207 	hdr = &desc->iscsi_header;
1208 	pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1209 		 hdr->opcode, hdr->itt, hdr->flags,
1210 		 (int)(xfer_len - ISER_HEADERS_LEN));
1211 
1212 	if ((char *)desc == isert_conn->login_req_buf)
1213 		isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1214 				   isert_conn);
1215 	else
1216 		isert_rx_do_work(desc, isert_conn);
1217 
1218 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1219 				      DMA_FROM_DEVICE);
1220 
1221 	isert_conn->post_recv_buf_count--;
1222 	pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1223 		 isert_conn->post_recv_buf_count);
1224 
1225 	if ((char *)desc == isert_conn->login_req_buf)
1226 		return;
1227 
1228 	outstanding = isert_conn->post_recv_buf_count;
1229 	if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1230 		int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1231 				ISERT_MIN_POSTED_RX);
1232 		err = isert_post_recv(isert_conn, count);
1233 		if (err) {
1234 			pr_err("isert_post_recv() count: %d failed, %d\n",
1235 			       count, err);
1236 		}
1237 	}
1238 }
1239 
1240 static void
1241 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1242 {
1243 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1244 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1245 
1246 	pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1247 
1248 	if (wr->sge) {
1249 		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1250 		wr->sge = NULL;
1251 	}
1252 
1253 	kfree(wr->send_wr);
1254 	wr->send_wr = NULL;
1255 
1256 	kfree(isert_cmd->ib_sge);
1257 	isert_cmd->ib_sge = NULL;
1258 }
1259 
1260 static void
1261 isert_put_cmd(struct isert_cmd *isert_cmd)
1262 {
1263 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1264 	struct isert_conn *isert_conn = isert_cmd->conn;
1265 	struct iscsi_conn *conn = isert_conn->conn;
1266 
1267 	pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1268 
1269 	switch (cmd->iscsi_opcode) {
1270 	case ISCSI_OP_SCSI_CMD:
1271 		spin_lock_bh(&conn->cmd_lock);
1272 		if (!list_empty(&cmd->i_conn_node))
1273 			list_del(&cmd->i_conn_node);
1274 		spin_unlock_bh(&conn->cmd_lock);
1275 
1276 		if (cmd->data_direction == DMA_TO_DEVICE)
1277 			iscsit_stop_dataout_timer(cmd);
1278 
1279 		isert_unmap_cmd(isert_cmd, isert_conn);
1280 		transport_generic_free_cmd(&cmd->se_cmd, 0);
1281 		break;
1282 	case ISCSI_OP_SCSI_TMFUNC:
1283 		spin_lock_bh(&conn->cmd_lock);
1284 		if (!list_empty(&cmd->i_conn_node))
1285 			list_del(&cmd->i_conn_node);
1286 		spin_unlock_bh(&conn->cmd_lock);
1287 
1288 		transport_generic_free_cmd(&cmd->se_cmd, 0);
1289 		break;
1290 	case ISCSI_OP_REJECT:
1291 	case ISCSI_OP_NOOP_OUT:
1292 	case ISCSI_OP_TEXT:
1293 		spin_lock_bh(&conn->cmd_lock);
1294 		if (!list_empty(&cmd->i_conn_node))
1295 			list_del(&cmd->i_conn_node);
1296 		spin_unlock_bh(&conn->cmd_lock);
1297 
1298 		/*
1299 		 * Handle special case for REJECT when iscsi_add_reject*() has
1300 		 * overwritten the original iscsi_opcode assignment, and the
1301 		 * associated cmd->se_cmd needs to be released.
1302 		 */
1303 		if (cmd->se_cmd.se_tfo != NULL) {
1304 			pr_debug("Calling transport_generic_free_cmd from"
1305 				 " isert_put_cmd for 0x%02x\n",
1306 				 cmd->iscsi_opcode);
1307 			transport_generic_free_cmd(&cmd->se_cmd, 0);
1308 			break;
1309 		}
1310 		/*
1311 		 * Fall-through
1312 		 */
1313 	default:
1314 		isert_release_cmd(cmd);
1315 		break;
1316 	}
1317 }
1318 
1319 static void
1320 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1321 {
1322 	if (tx_desc->dma_addr != 0) {
1323 		pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1324 		ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1325 				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
1326 		tx_desc->dma_addr = 0;
1327 	}
1328 }
1329 
1330 static void
1331 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1332 		     struct ib_device *ib_dev)
1333 {
1334 	if (isert_cmd->pdu_buf_dma != 0) {
1335 		pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1336 		ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1337 				    isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1338 		isert_cmd->pdu_buf_dma = 0;
1339 	}
1340 
1341 	isert_unmap_tx_desc(tx_desc, ib_dev);
1342 	isert_put_cmd(isert_cmd);
1343 }
1344 
1345 static void
1346 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1347 			   struct isert_cmd *isert_cmd)
1348 {
1349 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1350 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1351 	struct se_cmd *se_cmd = &cmd->se_cmd;
1352 	struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
1353 
1354 	iscsit_stop_dataout_timer(cmd);
1355 
1356 	if (wr->sge) {
1357 		pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1358 		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1359 		wr->sge = NULL;
1360 	}
1361 
1362 	if (isert_cmd->ib_sge) {
1363 		pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1364 		kfree(isert_cmd->ib_sge);
1365 		isert_cmd->ib_sge = NULL;
1366 	}
1367 
1368 	cmd->write_data_done = se_cmd->data_length;
1369 
1370 	pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1371 	spin_lock_bh(&cmd->istate_lock);
1372 	cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1373 	cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1374 	spin_unlock_bh(&cmd->istate_lock);
1375 
1376 	target_execute_cmd(se_cmd);
1377 }
1378 
1379 static void
1380 isert_do_control_comp(struct work_struct *work)
1381 {
1382 	struct isert_cmd *isert_cmd = container_of(work,
1383 			struct isert_cmd, comp_work);
1384 	struct isert_conn *isert_conn = isert_cmd->conn;
1385 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1386 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1387 
1388 	switch (cmd->i_state) {
1389 	case ISTATE_SEND_TASKMGTRSP:
1390 		pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1391 
1392 		atomic_dec(&isert_conn->post_send_buf_count);
1393 		iscsit_tmr_post_handler(cmd, cmd->conn);
1394 
1395 		cmd->i_state = ISTATE_SENT_STATUS;
1396 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1397 		break;
1398 	case ISTATE_SEND_REJECT:
1399 		pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1400 		atomic_dec(&isert_conn->post_send_buf_count);
1401 
1402 		cmd->i_state = ISTATE_SENT_STATUS;
1403 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1404 		break;
1405 	case ISTATE_SEND_LOGOUTRSP:
1406 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1407 		/*
1408 		 * Call atomic_dec(&isert_conn->post_send_buf_count)
1409 		 * from isert_free_conn()
1410 		 */
1411 		isert_conn->logout_posted = true;
1412 		iscsit_logout_post_handler(cmd, cmd->conn);
1413 		break;
1414 	case ISTATE_SEND_TEXTRSP:
1415 		atomic_dec(&isert_conn->post_send_buf_count);
1416 		cmd->i_state = ISTATE_SENT_STATUS;
1417 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1418 		break;
1419 	default:
1420 		pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1421 		dump_stack();
1422 		break;
1423 	}
1424 }
1425 
1426 static void
1427 isert_response_completion(struct iser_tx_desc *tx_desc,
1428 			  struct isert_cmd *isert_cmd,
1429 			  struct isert_conn *isert_conn,
1430 			  struct ib_device *ib_dev)
1431 {
1432 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1433 
1434 	if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1435 	    cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1436 	    cmd->i_state == ISTATE_SEND_REJECT ||
1437 	    cmd->i_state == ISTATE_SEND_TEXTRSP) {
1438 		isert_unmap_tx_desc(tx_desc, ib_dev);
1439 
1440 		INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1441 		queue_work(isert_comp_wq, &isert_cmd->comp_work);
1442 		return;
1443 	}
1444 	atomic_dec(&isert_conn->post_send_buf_count);
1445 
1446 	cmd->i_state = ISTATE_SENT_STATUS;
1447 	isert_completion_put(tx_desc, isert_cmd, ib_dev);
1448 }
1449 
1450 static void
1451 isert_send_completion(struct iser_tx_desc *tx_desc,
1452 		      struct isert_conn *isert_conn)
1453 {
1454 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1455 	struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1456 	struct isert_rdma_wr *wr;
1457 
1458 	if (!isert_cmd) {
1459 		atomic_dec(&isert_conn->post_send_buf_count);
1460 		isert_unmap_tx_desc(tx_desc, ib_dev);
1461 		return;
1462 	}
1463 	wr = &isert_cmd->rdma_wr;
1464 
1465 	switch (wr->iser_ib_op) {
1466 	case ISER_IB_RECV:
1467 		pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1468 		dump_stack();
1469 		break;
1470 	case ISER_IB_SEND:
1471 		pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1472 		isert_response_completion(tx_desc, isert_cmd,
1473 					  isert_conn, ib_dev);
1474 		break;
1475 	case ISER_IB_RDMA_WRITE:
1476 		pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1477 		dump_stack();
1478 		break;
1479 	case ISER_IB_RDMA_READ:
1480 		pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1481 
1482 		atomic_dec(&isert_conn->post_send_buf_count);
1483 		isert_completion_rdma_read(tx_desc, isert_cmd);
1484 		break;
1485 	default:
1486 		pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1487 		dump_stack();
1488 		break;
1489 	}
1490 }
1491 
1492 static void
1493 isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1494 {
1495 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1496 
1497 	if (tx_desc) {
1498 		struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1499 
1500 		if (!isert_cmd)
1501 			isert_unmap_tx_desc(tx_desc, ib_dev);
1502 		else
1503 			isert_completion_put(tx_desc, isert_cmd, ib_dev);
1504 	}
1505 
1506 	if (isert_conn->post_recv_buf_count == 0 &&
1507 	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
1508 		pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1509 		pr_debug("Calling wake_up from isert_cq_comp_err\n");
1510 
1511 		mutex_lock(&isert_conn->conn_mutex);
1512 		if (isert_conn->state != ISER_CONN_DOWN)
1513 			isert_conn->state = ISER_CONN_TERMINATING;
1514 		mutex_unlock(&isert_conn->conn_mutex);
1515 
1516 		wake_up(&isert_conn->conn_wait_comp_err);
1517 	}
1518 }
1519 
1520 static void
1521 isert_cq_tx_work(struct work_struct *work)
1522 {
1523 	struct isert_cq_desc *cq_desc = container_of(work,
1524 				struct isert_cq_desc, cq_tx_work);
1525 	struct isert_device *device = cq_desc->device;
1526 	int cq_index = cq_desc->cq_index;
1527 	struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1528 	struct isert_conn *isert_conn;
1529 	struct iser_tx_desc *tx_desc;
1530 	struct ib_wc wc;
1531 
1532 	while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1533 		tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1534 		isert_conn = wc.qp->qp_context;
1535 
1536 		if (wc.status == IB_WC_SUCCESS) {
1537 			isert_send_completion(tx_desc, isert_conn);
1538 		} else {
1539 			pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1540 			pr_debug("TX wc.status: 0x%08x\n", wc.status);
1541 			pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1542 			atomic_dec(&isert_conn->post_send_buf_count);
1543 			isert_cq_comp_err(tx_desc, isert_conn);
1544 		}
1545 	}
1546 
1547 	ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1548 }
1549 
1550 static void
1551 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1552 {
1553 	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1554 
1555 	INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1556 	queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1557 }
1558 
1559 static void
1560 isert_cq_rx_work(struct work_struct *work)
1561 {
1562 	struct isert_cq_desc *cq_desc = container_of(work,
1563 			struct isert_cq_desc, cq_rx_work);
1564 	struct isert_device *device = cq_desc->device;
1565 	int cq_index = cq_desc->cq_index;
1566 	struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1567 	struct isert_conn *isert_conn;
1568 	struct iser_rx_desc *rx_desc;
1569 	struct ib_wc wc;
1570 	unsigned long xfer_len;
1571 
1572 	while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1573 		rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1574 		isert_conn = wc.qp->qp_context;
1575 
1576 		if (wc.status == IB_WC_SUCCESS) {
1577 			xfer_len = (unsigned long)wc.byte_len;
1578 			isert_rx_completion(rx_desc, isert_conn, xfer_len);
1579 		} else {
1580 			pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1581 			if (wc.status != IB_WC_WR_FLUSH_ERR) {
1582 				pr_debug("RX wc.status: 0x%08x\n", wc.status);
1583 				pr_debug("RX wc.vendor_err: 0x%08x\n",
1584 					 wc.vendor_err);
1585 			}
1586 			isert_conn->post_recv_buf_count--;
1587 			isert_cq_comp_err(NULL, isert_conn);
1588 		}
1589 	}
1590 
1591 	ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1592 }
1593 
1594 static void
1595 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1596 {
1597 	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1598 
1599 	INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1600 	queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1601 }
1602 
1603 static int
1604 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1605 {
1606 	struct ib_send_wr *wr_failed;
1607 	int ret;
1608 
1609 	atomic_inc(&isert_conn->post_send_buf_count);
1610 
1611 	ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1612 			   &wr_failed);
1613 	if (ret) {
1614 		pr_err("ib_post_send failed with %d\n", ret);
1615 		atomic_dec(&isert_conn->post_send_buf_count);
1616 		return ret;
1617 	}
1618 	return ret;
1619 }
1620 
1621 static int
1622 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1623 {
1624 	struct isert_cmd *isert_cmd = container_of(cmd,
1625 					struct isert_cmd, iscsi_cmd);
1626 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1627 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1628 	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1629 				&isert_cmd->tx_desc.iscsi_header;
1630 
1631 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1632 	iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1633 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1634 	/*
1635 	 * Attach SENSE DATA payload to iSCSI Response PDU
1636 	 */
1637 	if (cmd->se_cmd.sense_buffer &&
1638 	    ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1639 	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1640 		struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1641 		struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1642 		u32 padding, pdu_len;
1643 
1644 		put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1645 				   cmd->sense_buffer);
1646 		cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1647 
1648 		padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1649 		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1650 		pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1651 
1652 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1653 				(void *)cmd->sense_buffer, pdu_len,
1654 				DMA_TO_DEVICE);
1655 
1656 		isert_cmd->pdu_buf_len = pdu_len;
1657 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
1658 		tx_dsg->length	= pdu_len;
1659 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
1660 		isert_cmd->tx_desc.num_sge = 2;
1661 	}
1662 
1663 	isert_init_send_wr(isert_cmd, send_wr);
1664 
1665 	pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1666 
1667 	return isert_post_response(isert_conn, isert_cmd);
1668 }
1669 
1670 static int
1671 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1672 		bool nopout_response)
1673 {
1674 	struct isert_cmd *isert_cmd = container_of(cmd,
1675 				struct isert_cmd, iscsi_cmd);
1676 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1677 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1678 
1679 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1680 	iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1681 			       &isert_cmd->tx_desc.iscsi_header,
1682 			       nopout_response);
1683 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1684 	isert_init_send_wr(isert_cmd, send_wr);
1685 
1686 	pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1687 
1688 	return isert_post_response(isert_conn, isert_cmd);
1689 }
1690 
1691 static int
1692 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1693 {
1694 	struct isert_cmd *isert_cmd = container_of(cmd,
1695 				struct isert_cmd, iscsi_cmd);
1696 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1697 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1698 
1699 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1700 	iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1701 				&isert_cmd->tx_desc.iscsi_header);
1702 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1703 	isert_init_send_wr(isert_cmd, send_wr);
1704 
1705 	pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1706 
1707 	return isert_post_response(isert_conn, isert_cmd);
1708 }
1709 
1710 static int
1711 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1712 {
1713 	struct isert_cmd *isert_cmd = container_of(cmd,
1714 				struct isert_cmd, iscsi_cmd);
1715 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1716 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1717 
1718 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1719 	iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1720 				  &isert_cmd->tx_desc.iscsi_header);
1721 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1722 	isert_init_send_wr(isert_cmd, send_wr);
1723 
1724 	pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1725 
1726 	return isert_post_response(isert_conn, isert_cmd);
1727 }
1728 
1729 static int
1730 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1731 {
1732 	struct isert_cmd *isert_cmd = container_of(cmd,
1733 				struct isert_cmd, iscsi_cmd);
1734 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1735 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1736 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1737 	struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1738 	struct iscsi_reject *hdr =
1739 		(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1740 
1741 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1742 	iscsit_build_reject(cmd, conn, hdr);
1743 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1744 
1745 	hton24(hdr->dlength, ISCSI_HDR_LEN);
1746 	isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1747 			(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1748 			DMA_TO_DEVICE);
1749 	isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1750 	tx_dsg->addr	= isert_cmd->pdu_buf_dma;
1751 	tx_dsg->length	= ISCSI_HDR_LEN;
1752 	tx_dsg->lkey	= isert_conn->conn_mr->lkey;
1753 	isert_cmd->tx_desc.num_sge = 2;
1754 
1755 	isert_init_send_wr(isert_cmd, send_wr);
1756 
1757 	pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1758 
1759 	return isert_post_response(isert_conn, isert_cmd);
1760 }
1761 
1762 static int
1763 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1764 {
1765 	struct isert_cmd *isert_cmd = container_of(cmd,
1766 				struct isert_cmd, iscsi_cmd);
1767 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1768 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1769 	struct iscsi_text_rsp *hdr =
1770 		(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1771 	u32 txt_rsp_len;
1772 	int rc;
1773 
1774 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1775 	rc = iscsit_build_text_rsp(cmd, conn, hdr);
1776 	if (rc < 0)
1777 		return rc;
1778 
1779 	txt_rsp_len = rc;
1780 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1781 
1782 	if (txt_rsp_len) {
1783 		struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1784 		struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1785 		void *txt_rsp_buf = cmd->buf_ptr;
1786 
1787 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1788 				txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1789 
1790 		isert_cmd->pdu_buf_len = txt_rsp_len;
1791 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
1792 		tx_dsg->length	= txt_rsp_len;
1793 		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
1794 		isert_cmd->tx_desc.num_sge = 2;
1795 	}
1796 	isert_init_send_wr(isert_cmd, send_wr);
1797 
1798 	pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1799 
1800 	return isert_post_response(isert_conn, isert_cmd);
1801 }
1802 
1803 static int
1804 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1805 		    struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1806 		    u32 data_left, u32 offset)
1807 {
1808 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1809 	struct scatterlist *sg_start, *tmp_sg;
1810 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1811 	u32 sg_off, page_off;
1812 	int i = 0, sg_nents;
1813 
1814 	sg_off = offset / PAGE_SIZE;
1815 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1816 	sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1817 	page_off = offset % PAGE_SIZE;
1818 
1819 	send_wr->sg_list = ib_sge;
1820 	send_wr->num_sge = sg_nents;
1821 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1822 	/*
1823 	 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1824 	 */
1825 	for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1826 		pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1827 			 (unsigned long long)tmp_sg->dma_address,
1828 			 tmp_sg->length, page_off);
1829 
1830 		ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1831 		ib_sge->length = min_t(u32, data_left,
1832 				ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1833 		ib_sge->lkey = isert_conn->conn_mr->lkey;
1834 
1835 		pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u\n",
1836 			 ib_sge->addr, ib_sge->length);
1837 		page_off = 0;
1838 		data_left -= ib_sge->length;
1839 		ib_sge++;
1840 		pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1841 	}
1842 
1843 	pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1844 		 send_wr->sg_list, send_wr->num_sge);
1845 
1846 	return sg_nents;
1847 }
1848 
1849 static int
1850 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1851 {
1852 	struct se_cmd *se_cmd = &cmd->se_cmd;
1853 	struct isert_cmd *isert_cmd = container_of(cmd,
1854 					struct isert_cmd, iscsi_cmd);
1855 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1856 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1857 	struct ib_send_wr *wr_failed, *send_wr;
1858 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1859 	struct ib_sge *ib_sge;
1860 	struct scatterlist *sg;
1861 	u32 offset = 0, data_len, data_left, rdma_write_max;
1862 	int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
1863 
1864 	pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
1865 
1866 	sg = &se_cmd->t_data_sg[0];
1867 	sg_nents = se_cmd->t_data_nents;
1868 
1869 	count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1870 	if (unlikely(!count)) {
1871 		pr_err("Unable to map put_datain SGs\n");
1872 		return -EINVAL;
1873 	}
1874 	wr->sge = sg;
1875 	wr->num_sge = sg_nents;
1876 	pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1877 		 count, sg, sg_nents);
1878 
1879 	ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1880 	if (!ib_sge) {
1881 		pr_warn("Unable to allocate datain ib_sge\n");
1882 		ret = -ENOMEM;
1883 		goto unmap_sg;
1884 	}
1885 	isert_cmd->ib_sge = ib_sge;
1886 
1887 	pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1888 		 ib_sge, se_cmd->t_data_nents);
1889 
1890 	wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1891 	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1892 				GFP_KERNEL);
1893 	if (!wr->send_wr) {
1894 		pr_err("Unable to allocate wr->send_wr\n");
1895 		ret = -ENOMEM;
1896 		goto unmap_sg;
1897 	}
1898 	pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1899 		 wr->send_wr, wr->send_wr_num);
1900 
1901 	iscsit_increment_maxcmdsn(cmd, conn->sess);
1902 	cmd->stat_sn = conn->stat_sn++;
1903 
1904 	wr->isert_cmd = isert_cmd;
1905 	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1906 	data_left = se_cmd->data_length;
1907 
1908 	for (i = 0; i < wr->send_wr_num; i++) {
1909 		send_wr = &isert_cmd->rdma_wr.send_wr[i];
1910 		data_len = min(data_left, rdma_write_max);
1911 
1912 		send_wr->opcode = IB_WR_RDMA_WRITE;
1913 		send_wr->send_flags = 0;
1914 		send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1915 		send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1916 
1917 		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1918 					send_wr, data_len, offset);
1919 		ib_sge += ib_sge_cnt;
1920 
1921 		if (i + 1 == wr->send_wr_num)
1922 			send_wr->next = &isert_cmd->tx_desc.send_wr;
1923 		else
1924 			send_wr->next = &wr->send_wr[i + 1];
1925 
1926 		offset += data_len;
1927 		data_left -= data_len;
1928 	}
1929 	/*
1930 	 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1931 	 */
1932 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1933 	iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1934 			     &isert_cmd->tx_desc.iscsi_header);
1935 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1936 	isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1937 
1938 	atomic_inc(&isert_conn->post_send_buf_count);
1939 
1940 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1941 	if (rc) {
1942 		pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1943 		atomic_dec(&isert_conn->post_send_buf_count);
1944 	}
1945 	pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1946 	return 1;
1947 
1948 unmap_sg:
1949 	ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1950 	return ret;
1951 }
1952 
1953 static int
1954 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1955 {
1956 	struct se_cmd *se_cmd = &cmd->se_cmd;
1957 	struct isert_cmd *isert_cmd = container_of(cmd,
1958 					struct isert_cmd, iscsi_cmd);
1959 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1960 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1961 	struct ib_send_wr *wr_failed, *send_wr;
1962 	struct ib_sge *ib_sge;
1963 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1964 	struct scatterlist *sg_start;
1965 	u32 sg_off, sg_nents, page_off, va_offset = 0;
1966 	u32 offset = 0, data_len, data_left, rdma_write_max;
1967 	int rc, ret = 0, count, i, ib_sge_cnt;
1968 
1969 	pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1970 		 se_cmd->data_length, cmd->write_data_done);
1971 
1972 	sg_off = cmd->write_data_done / PAGE_SIZE;
1973 	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1974 	page_off = cmd->write_data_done % PAGE_SIZE;
1975 
1976 	pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1977 		 sg_off, sg_start, page_off);
1978 
1979 	data_left = se_cmd->data_length - cmd->write_data_done;
1980 	sg_nents = se_cmd->t_data_nents - sg_off;
1981 
1982 	pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1983 		 data_left, sg_nents);
1984 
1985 	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1986 	if (unlikely(!count)) {
1987 		pr_err("Unable to map get_dataout SGs\n");
1988 		return -EINVAL;
1989 	}
1990 	wr->sge = sg_start;
1991 	wr->num_sge = sg_nents;
1992 	pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1993 		 count, sg_start, sg_nents);
1994 
1995 	ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1996 	if (!ib_sge) {
1997 		pr_warn("Unable to allocate dataout ib_sge\n");
1998 		ret = -ENOMEM;
1999 		goto unmap_sg;
2000 	}
2001 	isert_cmd->ib_sge = ib_sge;
2002 
2003 	pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
2004 		 ib_sge, sg_nents);
2005 
2006 	wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2007 	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2008 				GFP_KERNEL);
2009 	if (!wr->send_wr) {
2010 		pr_debug("Unable to allocate wr->send_wr\n");
2011 		ret = -ENOMEM;
2012 		goto unmap_sg;
2013 	}
2014 	pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
2015 		 wr->send_wr, wr->send_wr_num);
2016 
2017 	isert_cmd->tx_desc.isert_cmd = isert_cmd;
2018 
2019 	wr->iser_ib_op = ISER_IB_RDMA_READ;
2020 	wr->isert_cmd = isert_cmd;
2021 	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2022 	offset = cmd->write_data_done;
2023 
2024 	for (i = 0; i < wr->send_wr_num; i++) {
2025 		send_wr = &isert_cmd->rdma_wr.send_wr[i];
2026 		data_len = min(data_left, rdma_write_max);
2027 
2028 		send_wr->opcode = IB_WR_RDMA_READ;
2029 		send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2030 		send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2031 
2032 		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2033 					send_wr, data_len, offset);
2034 		ib_sge += ib_sge_cnt;
2035 
2036 		if (i + 1 == wr->send_wr_num)
2037 			send_wr->send_flags = IB_SEND_SIGNALED;
2038 		else
2039 			send_wr->next = &wr->send_wr[i + 1];
2040 
2041 		offset += data_len;
2042 		va_offset += data_len;
2043 		data_left -= data_len;
2044 	}
2045 
2046 	atomic_inc(&isert_conn->post_send_buf_count);
2047 
2048 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2049 	if (rc) {
2050 		pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2051 		atomic_dec(&isert_conn->post_send_buf_count);
2052 	}
2053 	pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
2054 	return 0;
2055 
2056 unmap_sg:
2057 	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
2058 	return ret;
2059 }
2060 
2061 static int
2062 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2063 {
2064 	int ret;
2065 
2066 	switch (state) {
2067 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2068 		ret = isert_put_nopin(cmd, conn, false);
2069 		break;
2070 	default:
2071 		pr_err("Unknown immediate state: 0x%02x\n", state);
2072 		ret = -EINVAL;
2073 		break;
2074 	}
2075 
2076 	return ret;
2077 }
2078 
2079 static int
2080 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2081 {
2082 	int ret;
2083 
2084 	switch (state) {
2085 	case ISTATE_SEND_LOGOUTRSP:
2086 		ret = isert_put_logout_rsp(cmd, conn);
2087 		if (!ret) {
2088 			pr_debug("Returning iSER Logout -EAGAIN\n");
2089 			ret = -EAGAIN;
2090 		}
2091 		break;
2092 	case ISTATE_SEND_NOPIN:
2093 		ret = isert_put_nopin(cmd, conn, true);
2094 		break;
2095 	case ISTATE_SEND_TASKMGTRSP:
2096 		ret = isert_put_tm_rsp(cmd, conn);
2097 		break;
2098 	case ISTATE_SEND_REJECT:
2099 		ret = isert_put_reject(cmd, conn);
2100 		break;
2101 	case ISTATE_SEND_TEXTRSP:
2102 		ret = isert_put_text_rsp(cmd, conn);
2103 		break;
2104 	case ISTATE_SEND_STATUS:
2105 		/*
2106 		 * Special case for sending non GOOD SCSI status from TX thread
2107 		 * context during pre se_cmd excecution failure.
2108 		 */
2109 		ret = isert_put_response(conn, cmd);
2110 		break;
2111 	default:
2112 		pr_err("Unknown response state: 0x%02x\n", state);
2113 		ret = -EINVAL;
2114 		break;
2115 	}
2116 
2117 	return ret;
2118 }
2119 
2120 static int
2121 isert_setup_np(struct iscsi_np *np,
2122 	       struct __kernel_sockaddr_storage *ksockaddr)
2123 {
2124 	struct isert_np *isert_np;
2125 	struct rdma_cm_id *isert_lid;
2126 	struct sockaddr *sa;
2127 	int ret;
2128 
2129 	isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2130 	if (!isert_np) {
2131 		pr_err("Unable to allocate struct isert_np\n");
2132 		return -ENOMEM;
2133 	}
2134 	init_waitqueue_head(&isert_np->np_accept_wq);
2135 	mutex_init(&isert_np->np_accept_mutex);
2136 	INIT_LIST_HEAD(&isert_np->np_accept_list);
2137 	init_completion(&isert_np->np_login_comp);
2138 
2139 	sa = (struct sockaddr *)ksockaddr;
2140 	pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2141 	/*
2142 	 * Setup the np->np_sockaddr from the passed sockaddr setup
2143 	 * in iscsi_target_configfs.c code..
2144 	 */
2145 	memcpy(&np->np_sockaddr, ksockaddr,
2146 	       sizeof(struct __kernel_sockaddr_storage));
2147 
2148 	isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2149 				IB_QPT_RC);
2150 	if (IS_ERR(isert_lid)) {
2151 		pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2152 		       PTR_ERR(isert_lid));
2153 		ret = PTR_ERR(isert_lid);
2154 		goto out;
2155 	}
2156 
2157 	ret = rdma_bind_addr(isert_lid, sa);
2158 	if (ret) {
2159 		pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2160 		goto out_lid;
2161 	}
2162 
2163 	ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2164 	if (ret) {
2165 		pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2166 		goto out_lid;
2167 	}
2168 
2169 	isert_np->np_cm_id = isert_lid;
2170 	np->np_context = isert_np;
2171 	pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2172 
2173 	return 0;
2174 
2175 out_lid:
2176 	rdma_destroy_id(isert_lid);
2177 out:
2178 	kfree(isert_np);
2179 	return ret;
2180 }
2181 
2182 static int
2183 isert_check_accept_queue(struct isert_np *isert_np)
2184 {
2185 	int empty;
2186 
2187 	mutex_lock(&isert_np->np_accept_mutex);
2188 	empty = list_empty(&isert_np->np_accept_list);
2189 	mutex_unlock(&isert_np->np_accept_mutex);
2190 
2191 	return empty;
2192 }
2193 
2194 static int
2195 isert_rdma_accept(struct isert_conn *isert_conn)
2196 {
2197 	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2198 	struct rdma_conn_param cp;
2199 	int ret;
2200 
2201 	memset(&cp, 0, sizeof(struct rdma_conn_param));
2202 	cp.responder_resources = isert_conn->responder_resources;
2203 	cp.initiator_depth = isert_conn->initiator_depth;
2204 	cp.retry_count = 7;
2205 	cp.rnr_retry_count = 7;
2206 
2207 	pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2208 
2209 	ret = rdma_accept(cm_id, &cp);
2210 	if (ret) {
2211 		pr_err("rdma_accept() failed with: %d\n", ret);
2212 		return ret;
2213 	}
2214 
2215 	pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2216 
2217 	return 0;
2218 }
2219 
2220 static int
2221 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2222 {
2223 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2224 	int ret;
2225 
2226 	pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2227 
2228 	ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2229 	if (ret)
2230 		return ret;
2231 
2232 	pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2233 	return 0;
2234 }
2235 
2236 static void
2237 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2238 		    struct isert_conn *isert_conn)
2239 {
2240 	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2241 	struct rdma_route *cm_route = &cm_id->route;
2242 	struct sockaddr_in *sock_in;
2243 	struct sockaddr_in6 *sock_in6;
2244 
2245 	conn->login_family = np->np_sockaddr.ss_family;
2246 
2247 	if (np->np_sockaddr.ss_family == AF_INET6) {
2248 		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2249 		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2250 			 &sock_in6->sin6_addr.in6_u);
2251 		conn->login_port = ntohs(sock_in6->sin6_port);
2252 
2253 		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2254 		snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2255 			 &sock_in6->sin6_addr.in6_u);
2256 		conn->local_port = ntohs(sock_in6->sin6_port);
2257 	} else {
2258 		sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2259 		sprintf(conn->login_ip, "%pI4",
2260 			&sock_in->sin_addr.s_addr);
2261 		conn->login_port = ntohs(sock_in->sin_port);
2262 
2263 		sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2264 		sprintf(conn->local_ip, "%pI4",
2265 			&sock_in->sin_addr.s_addr);
2266 		conn->local_port = ntohs(sock_in->sin_port);
2267 	}
2268 }
2269 
2270 static int
2271 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2272 {
2273 	struct isert_np *isert_np = (struct isert_np *)np->np_context;
2274 	struct isert_conn *isert_conn;
2275 	int max_accept = 0, ret;
2276 
2277 accept_wait:
2278 	ret = wait_event_interruptible(isert_np->np_accept_wq,
2279 			!isert_check_accept_queue(isert_np) ||
2280 			np->np_thread_state == ISCSI_NP_THREAD_RESET);
2281 	if (max_accept > 5)
2282 		return -ENODEV;
2283 
2284 	spin_lock_bh(&np->np_thread_lock);
2285 	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2286 		spin_unlock_bh(&np->np_thread_lock);
2287 		pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2288 		return -ENODEV;
2289 	}
2290 	spin_unlock_bh(&np->np_thread_lock);
2291 
2292 	mutex_lock(&isert_np->np_accept_mutex);
2293 	if (list_empty(&isert_np->np_accept_list)) {
2294 		mutex_unlock(&isert_np->np_accept_mutex);
2295 		max_accept++;
2296 		goto accept_wait;
2297 	}
2298 	isert_conn = list_first_entry(&isert_np->np_accept_list,
2299 			struct isert_conn, conn_accept_node);
2300 	list_del_init(&isert_conn->conn_accept_node);
2301 	mutex_unlock(&isert_np->np_accept_mutex);
2302 
2303 	conn->context = isert_conn;
2304 	isert_conn->conn = conn;
2305 	max_accept = 0;
2306 
2307 	ret = isert_rdma_post_recvl(isert_conn);
2308 	if (ret)
2309 		return ret;
2310 
2311 	ret = isert_rdma_accept(isert_conn);
2312 	if (ret)
2313 		return ret;
2314 
2315 	isert_set_conn_info(np, conn, isert_conn);
2316 
2317 	pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2318 	return 0;
2319 }
2320 
2321 static void
2322 isert_free_np(struct iscsi_np *np)
2323 {
2324 	struct isert_np *isert_np = (struct isert_np *)np->np_context;
2325 
2326 	rdma_destroy_id(isert_np->np_cm_id);
2327 
2328 	np->np_context = NULL;
2329 	kfree(isert_np);
2330 }
2331 
2332 static int isert_check_state(struct isert_conn *isert_conn, int state)
2333 {
2334 	int ret;
2335 
2336 	mutex_lock(&isert_conn->conn_mutex);
2337 	ret = (isert_conn->state == state);
2338 	mutex_unlock(&isert_conn->conn_mutex);
2339 
2340 	return ret;
2341 }
2342 
2343 static void isert_free_conn(struct iscsi_conn *conn)
2344 {
2345 	struct isert_conn *isert_conn = conn->context;
2346 
2347 	pr_debug("isert_free_conn: Starting \n");
2348 	/*
2349 	 * Decrement post_send_buf_count for special case when called
2350 	 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2351 	 */
2352 	mutex_lock(&isert_conn->conn_mutex);
2353 	if (isert_conn->logout_posted)
2354 		atomic_dec(&isert_conn->post_send_buf_count);
2355 
2356 	if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2357 		pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2358 		rdma_disconnect(isert_conn->conn_cm_id);
2359 	}
2360 	/*
2361 	 * Only wait for conn_wait_comp_err if the isert_conn made it
2362 	 * into full feature phase..
2363 	 */
2364 	if (isert_conn->state == ISER_CONN_UP) {
2365 		pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2366 			 isert_conn->state);
2367 		mutex_unlock(&isert_conn->conn_mutex);
2368 
2369 		wait_event(isert_conn->conn_wait_comp_err,
2370 			  (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2371 
2372 		wait_event(isert_conn->conn_wait,
2373 			  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2374 
2375 		isert_put_conn(isert_conn);
2376 		return;
2377 	}
2378 	if (isert_conn->state == ISER_CONN_INIT) {
2379 		mutex_unlock(&isert_conn->conn_mutex);
2380 		isert_put_conn(isert_conn);
2381 		return;
2382 	}
2383 	pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2384 		 isert_conn->state);
2385 	mutex_unlock(&isert_conn->conn_mutex);
2386 
2387 	wait_event(isert_conn->conn_wait,
2388 		  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2389 
2390 	isert_put_conn(isert_conn);
2391 }
2392 
2393 static struct iscsit_transport iser_target_transport = {
2394 	.name			= "IB/iSER",
2395 	.transport_type		= ISCSI_INFINIBAND,
2396 	.owner			= THIS_MODULE,
2397 	.iscsit_setup_np	= isert_setup_np,
2398 	.iscsit_accept_np	= isert_accept_np,
2399 	.iscsit_free_np		= isert_free_np,
2400 	.iscsit_free_conn	= isert_free_conn,
2401 	.iscsit_alloc_cmd	= isert_alloc_cmd,
2402 	.iscsit_get_login_rx	= isert_get_login_rx,
2403 	.iscsit_put_login_tx	= isert_put_login_tx,
2404 	.iscsit_immediate_queue	= isert_immediate_queue,
2405 	.iscsit_response_queue	= isert_response_queue,
2406 	.iscsit_get_dataout	= isert_get_dataout,
2407 	.iscsit_queue_data_in	= isert_put_datain,
2408 	.iscsit_queue_status	= isert_put_response,
2409 };
2410 
2411 static int __init isert_init(void)
2412 {
2413 	int ret;
2414 
2415 	isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2416 	if (!isert_rx_wq) {
2417 		pr_err("Unable to allocate isert_rx_wq\n");
2418 		return -ENOMEM;
2419 	}
2420 
2421 	isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2422 	if (!isert_comp_wq) {
2423 		pr_err("Unable to allocate isert_comp_wq\n");
2424 		ret = -ENOMEM;
2425 		goto destroy_rx_wq;
2426 	}
2427 
2428 	isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2429 			sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2430 			0, NULL);
2431 	if (!isert_cmd_cache) {
2432 		pr_err("Unable to create isert_cmd_cache\n");
2433 		ret = -ENOMEM;
2434 		goto destroy_tx_cq;
2435 	}
2436 
2437 	iscsit_register_transport(&iser_target_transport);
2438 	pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2439 	return 0;
2440 
2441 destroy_tx_cq:
2442 	destroy_workqueue(isert_comp_wq);
2443 destroy_rx_wq:
2444 	destroy_workqueue(isert_rx_wq);
2445 	return ret;
2446 }
2447 
2448 static void __exit isert_exit(void)
2449 {
2450 	kmem_cache_destroy(isert_cmd_cache);
2451 	destroy_workqueue(isert_comp_wq);
2452 	destroy_workqueue(isert_rx_wq);
2453 	iscsit_unregister_transport(&iser_target_transport);
2454 	pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2455 }
2456 
2457 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2458 MODULE_VERSION("0.1");
2459 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2460 MODULE_LICENSE("GPL");
2461 
2462 module_init(isert_init);
2463 module_exit(isert_exit);
2464