1 /*
2  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/slab.h>
36 #include <linux/delay.h>
37 
38 #include "iscsi_iser.h"
39 
40 #define ISCSI_ISER_MAX_CONN	8
41 #define ISER_MAX_RX_CQ_LEN	(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
42 #define ISER_MAX_TX_CQ_LEN	(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
43 
44 static void iser_cq_tasklet_fn(unsigned long data);
45 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
46 
47 static void iser_cq_event_callback(struct ib_event *cause, void *context)
48 {
49 	iser_err("got cq event %d \n", cause->event);
50 }
51 
52 static void iser_qp_event_callback(struct ib_event *cause, void *context)
53 {
54 	iser_err("got qp event %d\n",cause->event);
55 }
56 
57 static void iser_event_handler(struct ib_event_handler *handler,
58 				struct ib_event *event)
59 {
60 	iser_err("async event %d on device %s port %d\n", event->event,
61 		event->device->name, event->element.port_num);
62 }
63 
64 /**
65  * iser_create_device_ib_res - creates Protection Domain (PD), Completion
66  * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
67  * the adapator.
68  *
69  * returns 0 on success, -1 on failure
70  */
71 static int iser_create_device_ib_res(struct iser_device *device)
72 {
73 	int i, j;
74 	struct iser_cq_desc *cq_desc;
75 
76 	device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
77 	iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
78 		 device->ib_device->name, device->ib_device->num_comp_vectors);
79 
80 	device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
81 				  GFP_KERNEL);
82 	if (device->cq_desc == NULL)
83 		goto cq_desc_err;
84 	cq_desc = device->cq_desc;
85 
86 	device->pd = ib_alloc_pd(device->ib_device);
87 	if (IS_ERR(device->pd))
88 		goto pd_err;
89 
90 	for (i = 0; i < device->cqs_used; i++) {
91 		cq_desc[i].device   = device;
92 		cq_desc[i].cq_index = i;
93 
94 		device->rx_cq[i] = ib_create_cq(device->ib_device,
95 					  iser_cq_callback,
96 					  iser_cq_event_callback,
97 					  (void *)&cq_desc[i],
98 					  ISER_MAX_RX_CQ_LEN, i);
99 		if (IS_ERR(device->rx_cq[i]))
100 			goto cq_err;
101 
102 		device->tx_cq[i] = ib_create_cq(device->ib_device,
103 					  NULL, iser_cq_event_callback,
104 					  (void *)&cq_desc[i],
105 					  ISER_MAX_TX_CQ_LEN, i);
106 
107 		if (IS_ERR(device->tx_cq[i]))
108 			goto cq_err;
109 
110 		if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
111 			goto cq_err;
112 
113 		tasklet_init(&device->cq_tasklet[i],
114 			     iser_cq_tasklet_fn,
115 			(unsigned long)&cq_desc[i]);
116 	}
117 
118 	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
119 				   IB_ACCESS_REMOTE_WRITE |
120 				   IB_ACCESS_REMOTE_READ);
121 	if (IS_ERR(device->mr))
122 		goto dma_mr_err;
123 
124 	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
125 				iser_event_handler);
126 	if (ib_register_event_handler(&device->event_handler))
127 		goto handler_err;
128 
129 	return 0;
130 
131 handler_err:
132 	ib_dereg_mr(device->mr);
133 dma_mr_err:
134 	for (j = 0; j < device->cqs_used; j++)
135 		tasklet_kill(&device->cq_tasklet[j]);
136 cq_err:
137 	for (j = 0; j < i; j++) {
138 		if (device->tx_cq[j])
139 			ib_destroy_cq(device->tx_cq[j]);
140 		if (device->rx_cq[j])
141 			ib_destroy_cq(device->rx_cq[j]);
142 	}
143 	ib_dealloc_pd(device->pd);
144 pd_err:
145 	kfree(device->cq_desc);
146 cq_desc_err:
147 	iser_err("failed to allocate an IB resource\n");
148 	return -1;
149 }
150 
151 /**
152  * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
153  * CQ and PD created with the device associated with the adapator.
154  */
155 static void iser_free_device_ib_res(struct iser_device *device)
156 {
157 	int i;
158 	BUG_ON(device->mr == NULL);
159 
160 	for (i = 0; i < device->cqs_used; i++) {
161 		tasklet_kill(&device->cq_tasklet[i]);
162 		(void)ib_destroy_cq(device->tx_cq[i]);
163 		(void)ib_destroy_cq(device->rx_cq[i]);
164 		device->tx_cq[i] = NULL;
165 		device->rx_cq[i] = NULL;
166 	}
167 
168 	(void)ib_unregister_event_handler(&device->event_handler);
169 	(void)ib_dereg_mr(device->mr);
170 	(void)ib_dealloc_pd(device->pd);
171 
172 	kfree(device->cq_desc);
173 
174 	device->mr = NULL;
175 	device->pd = NULL;
176 }
177 
178 /**
179  * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP)
180  *
181  * returns 0 on success, -1 on failure
182  */
183 static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
184 {
185 	struct iser_device	*device;
186 	struct ib_qp_init_attr	init_attr;
187 	int			req_err, resp_err, ret = -ENOMEM;
188 	struct ib_fmr_pool_param params;
189 	int index, min_index = 0;
190 
191 	BUG_ON(ib_conn->device == NULL);
192 
193 	device = ib_conn->device;
194 
195 	ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
196 					ISER_RX_LOGIN_SIZE, GFP_KERNEL);
197 	if (!ib_conn->login_buf)
198 		goto out_err;
199 
200 	ib_conn->login_req_buf  = ib_conn->login_buf;
201 	ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN;
202 
203 	ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
204 				(void *)ib_conn->login_req_buf,
205 				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
206 
207 	ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
208 				(void *)ib_conn->login_resp_buf,
209 				ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
210 
211 	req_err  = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma);
212 	resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma);
213 
214 	if (req_err || resp_err) {
215 		if (req_err)
216 			ib_conn->login_req_dma = 0;
217 		if (resp_err)
218 			ib_conn->login_resp_dma = 0;
219 		goto out_err;
220 	}
221 
222 	ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
223 				    (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
224 				    GFP_KERNEL);
225 	if (!ib_conn->page_vec)
226 		goto out_err;
227 
228 	ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
229 
230 	params.page_shift        = SHIFT_4K;
231 	/* when the first/last SG element are not start/end *
232 	 * page aligned, the map whould be of N+1 pages     */
233 	params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
234 	/* make the pool size twice the max number of SCSI commands *
235 	 * the ML is expected to queue, watermark for unmap at 50%  */
236 	params.pool_size	 = ISCSI_DEF_XMIT_CMDS_MAX * 2;
237 	params.dirty_watermark	 = ISCSI_DEF_XMIT_CMDS_MAX;
238 	params.cache		 = 0;
239 	params.flush_function	 = NULL;
240 	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
241 				    IB_ACCESS_REMOTE_WRITE |
242 				    IB_ACCESS_REMOTE_READ);
243 
244 	ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
245 	ret = PTR_ERR(ib_conn->fmr_pool);
246 	if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) {
247 		ib_conn->fmr_pool = NULL;
248 		goto out_err;
249 	} else if (ret == -ENOSYS) {
250 		ib_conn->fmr_pool = NULL;
251 		iser_warn("FMRs are not supported, using unaligned mode\n");
252 		ret = 0;
253 	}
254 
255 	memset(&init_attr, 0, sizeof init_attr);
256 
257 	mutex_lock(&ig.connlist_mutex);
258 	/* select the CQ with the minimal number of usages */
259 	for (index = 0; index < device->cqs_used; index++)
260 		if (device->cq_active_qps[index] <
261 		    device->cq_active_qps[min_index])
262 			min_index = index;
263 	device->cq_active_qps[min_index]++;
264 	mutex_unlock(&ig.connlist_mutex);
265 	iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
266 
267 	init_attr.event_handler = iser_qp_event_callback;
268 	init_attr.qp_context	= (void *)ib_conn;
269 	init_attr.send_cq	= device->tx_cq[min_index];
270 	init_attr.recv_cq	= device->rx_cq[min_index];
271 	init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS;
272 	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
273 	init_attr.cap.max_send_sge = 2;
274 	init_attr.cap.max_recv_sge = 1;
275 	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
276 	init_attr.qp_type	= IB_QPT_RC;
277 
278 	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
279 	if (ret)
280 		goto out_err;
281 
282 	ib_conn->qp = ib_conn->cma_id->qp;
283 	iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
284 		 ib_conn, ib_conn->cma_id,
285 		 ib_conn->fmr_pool, ib_conn->cma_id->qp);
286 	return ret;
287 
288 out_err:
289 	iser_err("unable to alloc mem or create resource, err %d\n", ret);
290 	return ret;
291 }
292 
293 /**
294  * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
295  * -1 on failure
296  */
297 static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
298 {
299 	int cq_index;
300 	BUG_ON(ib_conn == NULL);
301 
302 	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
303 		 ib_conn, ib_conn->cma_id,
304 		 ib_conn->fmr_pool, ib_conn->qp);
305 
306 	/* qp is created only once both addr & route are resolved */
307 	if (ib_conn->fmr_pool != NULL)
308 		ib_destroy_fmr_pool(ib_conn->fmr_pool);
309 
310 	if (ib_conn->qp != NULL) {
311 		cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
312 		ib_conn->device->cq_active_qps[cq_index]--;
313 
314 		rdma_destroy_qp(ib_conn->cma_id);
315 	}
316 	/* if cma handler context, the caller acts s.t the cma destroy the id */
317 	if (ib_conn->cma_id != NULL && can_destroy_id)
318 		rdma_destroy_id(ib_conn->cma_id);
319 
320 	ib_conn->fmr_pool = NULL;
321 	ib_conn->qp	  = NULL;
322 	ib_conn->cma_id   = NULL;
323 	kfree(ib_conn->page_vec);
324 
325 	if (ib_conn->login_buf) {
326 		if (ib_conn->login_req_dma)
327 			ib_dma_unmap_single(ib_conn->device->ib_device,
328 				ib_conn->login_req_dma,
329 				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
330 		if (ib_conn->login_resp_dma)
331 			ib_dma_unmap_single(ib_conn->device->ib_device,
332 				ib_conn->login_resp_dma,
333 				ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
334 		kfree(ib_conn->login_buf);
335 	}
336 
337 	return 0;
338 }
339 
340 /**
341  * based on the resolved device node GUID see if there already allocated
342  * device for this device. If there's no such, create one.
343  */
344 static
345 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
346 {
347 	struct iser_device *device;
348 
349 	mutex_lock(&ig.device_list_mutex);
350 
351 	list_for_each_entry(device, &ig.device_list, ig_list)
352 		/* find if there's a match using the node GUID */
353 		if (device->ib_device->node_guid == cma_id->device->node_guid)
354 			goto inc_refcnt;
355 
356 	device = kzalloc(sizeof *device, GFP_KERNEL);
357 	if (device == NULL)
358 		goto out;
359 
360 	/* assign this device to the device */
361 	device->ib_device = cma_id->device;
362 	/* init the device and link it into ig device list */
363 	if (iser_create_device_ib_res(device)) {
364 		kfree(device);
365 		device = NULL;
366 		goto out;
367 	}
368 	list_add(&device->ig_list, &ig.device_list);
369 
370 inc_refcnt:
371 	device->refcount++;
372 out:
373 	mutex_unlock(&ig.device_list_mutex);
374 	return device;
375 }
376 
377 /* if there's no demand for this device, release it */
378 static void iser_device_try_release(struct iser_device *device)
379 {
380 	mutex_lock(&ig.device_list_mutex);
381 	device->refcount--;
382 	iser_err("device %p refcount %d\n",device,device->refcount);
383 	if (!device->refcount) {
384 		iser_free_device_ib_res(device);
385 		list_del(&device->ig_list);
386 		kfree(device);
387 	}
388 	mutex_unlock(&ig.device_list_mutex);
389 }
390 
391 static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
392 				     enum iser_ib_conn_state comp,
393 				     enum iser_ib_conn_state exch)
394 {
395 	int ret;
396 
397 	spin_lock_bh(&ib_conn->lock);
398 	if ((ret = (ib_conn->state == comp)))
399 		ib_conn->state = exch;
400 	spin_unlock_bh(&ib_conn->lock);
401 	return ret;
402 }
403 
404 /**
405  * Frees all conn objects and deallocs conn descriptor
406  */
407 static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
408 {
409 	struct iser_device  *device = ib_conn->device;
410 
411 	BUG_ON(ib_conn->state != ISER_CONN_DOWN);
412 
413 	mutex_lock(&ig.connlist_mutex);
414 	list_del(&ib_conn->conn_list);
415 	mutex_unlock(&ig.connlist_mutex);
416 	iser_free_rx_descriptors(ib_conn);
417 	iser_free_ib_conn_res(ib_conn, can_destroy_id);
418 	ib_conn->device = NULL;
419 	/* on EVENT_ADDR_ERROR there's no device yet for this conn */
420 	if (device != NULL)
421 		iser_device_try_release(device);
422 	iscsi_destroy_endpoint(ib_conn->ep);
423 }
424 
425 void iser_conn_get(struct iser_conn *ib_conn)
426 {
427 	atomic_inc(&ib_conn->refcount);
428 }
429 
430 int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
431 {
432 	if (atomic_dec_and_test(&ib_conn->refcount)) {
433 		iser_conn_release(ib_conn, can_destroy_id);
434 		return 1;
435 	}
436 	return 0;
437 }
438 
439 /**
440  * triggers start of the disconnect procedures and wait for them to be done
441  */
442 void iser_conn_terminate(struct iser_conn *ib_conn)
443 {
444 	int err = 0;
445 
446 	/* change the ib conn state only if the conn is UP, however always call
447 	 * rdma_disconnect since this is the only way to cause the CMA to change
448 	 * the QP state to ERROR
449 	 */
450 
451 	iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
452 	err = rdma_disconnect(ib_conn->cma_id);
453 	if (err)
454 		iser_err("Failed to disconnect, conn: 0x%p err %d\n",
455 			 ib_conn,err);
456 
457 	wait_event_interruptible(ib_conn->wait,
458 				 ib_conn->state == ISER_CONN_DOWN);
459 
460 	iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
461 }
462 
463 static int iser_connect_error(struct rdma_cm_id *cma_id)
464 {
465 	struct iser_conn *ib_conn;
466 	ib_conn = (struct iser_conn *)cma_id->context;
467 
468 	ib_conn->state = ISER_CONN_DOWN;
469 	wake_up_interruptible(&ib_conn->wait);
470 	return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
471 }
472 
473 static int iser_addr_handler(struct rdma_cm_id *cma_id)
474 {
475 	struct iser_device *device;
476 	struct iser_conn   *ib_conn;
477 	int    ret;
478 
479 	device = iser_device_find_by_ib_device(cma_id);
480 	if (!device) {
481 		iser_err("device lookup/creation failed\n");
482 		return iser_connect_error(cma_id);
483 	}
484 
485 	ib_conn = (struct iser_conn *)cma_id->context;
486 	ib_conn->device = device;
487 
488 	ret = rdma_resolve_route(cma_id, 1000);
489 	if (ret) {
490 		iser_err("resolve route failed: %d\n", ret);
491 		return iser_connect_error(cma_id);
492 	}
493 
494 	return 0;
495 }
496 
497 static int iser_route_handler(struct rdma_cm_id *cma_id)
498 {
499 	struct rdma_conn_param conn_param;
500 	int    ret;
501 
502 	ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
503 	if (ret)
504 		goto failure;
505 
506 	memset(&conn_param, 0, sizeof conn_param);
507 	conn_param.responder_resources = 4;
508 	conn_param.initiator_depth     = 1;
509 	conn_param.retry_count	       = 7;
510 	conn_param.rnr_retry_count     = 6;
511 
512 	ret = rdma_connect(cma_id, &conn_param);
513 	if (ret) {
514 		iser_err("failure connecting: %d\n", ret);
515 		goto failure;
516 	}
517 
518 	return 0;
519 failure:
520 	return iser_connect_error(cma_id);
521 }
522 
523 static void iser_connected_handler(struct rdma_cm_id *cma_id)
524 {
525 	struct iser_conn *ib_conn;
526 
527 	ib_conn = (struct iser_conn *)cma_id->context;
528 	ib_conn->state = ISER_CONN_UP;
529 	wake_up_interruptible(&ib_conn->wait);
530 }
531 
532 static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
533 {
534 	struct iser_conn *ib_conn;
535 	int ret;
536 
537 	ib_conn = (struct iser_conn *)cma_id->context;
538 
539 	/* getting here when the state is UP means that the conn is being *
540 	 * terminated asynchronously from the iSCSI layer's perspective.  */
541 	if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
542 				      ISER_CONN_TERMINATING))
543 		iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
544 				   ISCSI_ERR_CONN_FAILED);
545 
546 	/* Complete the termination process if no posts are pending */
547 	if (ib_conn->post_recv_buf_count == 0 &&
548 	    (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
549 		ib_conn->state = ISER_CONN_DOWN;
550 		wake_up_interruptible(&ib_conn->wait);
551 	}
552 
553 	ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
554 	return ret;
555 }
556 
557 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
558 {
559 	int ret = 0;
560 
561 	iser_err("event %d status %d conn %p id %p\n",
562 		event->event, event->status, cma_id->context, cma_id);
563 
564 	switch (event->event) {
565 	case RDMA_CM_EVENT_ADDR_RESOLVED:
566 		ret = iser_addr_handler(cma_id);
567 		break;
568 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
569 		ret = iser_route_handler(cma_id);
570 		break;
571 	case RDMA_CM_EVENT_ESTABLISHED:
572 		iser_connected_handler(cma_id);
573 		break;
574 	case RDMA_CM_EVENT_ADDR_ERROR:
575 	case RDMA_CM_EVENT_ROUTE_ERROR:
576 	case RDMA_CM_EVENT_CONNECT_ERROR:
577 	case RDMA_CM_EVENT_UNREACHABLE:
578 	case RDMA_CM_EVENT_REJECTED:
579 		ret = iser_connect_error(cma_id);
580 		break;
581 	case RDMA_CM_EVENT_DISCONNECTED:
582 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
583 	case RDMA_CM_EVENT_ADDR_CHANGE:
584 		ret = iser_disconnected_handler(cma_id);
585 		break;
586 	default:
587 		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
588 		break;
589 	}
590 	return ret;
591 }
592 
593 void iser_conn_init(struct iser_conn *ib_conn)
594 {
595 	ib_conn->state = ISER_CONN_INIT;
596 	init_waitqueue_head(&ib_conn->wait);
597 	ib_conn->post_recv_buf_count = 0;
598 	atomic_set(&ib_conn->post_send_buf_count, 0);
599 	atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
600 	INIT_LIST_HEAD(&ib_conn->conn_list);
601 	spin_lock_init(&ib_conn->lock);
602 }
603 
604  /**
605  * starts the process of connecting to the target
606  * sleeps until the connection is established or rejected
607  */
608 int iser_connect(struct iser_conn   *ib_conn,
609 		 struct sockaddr_in *src_addr,
610 		 struct sockaddr_in *dst_addr,
611 		 int                 non_blocking)
612 {
613 	struct sockaddr *src, *dst;
614 	int err = 0;
615 
616 	sprintf(ib_conn->name, "%pI4:%d",
617 		&dst_addr->sin_addr.s_addr, dst_addr->sin_port);
618 
619 	/* the device is known only --after-- address resolution */
620 	ib_conn->device = NULL;
621 
622 	iser_err("connecting to: %pI4, port 0x%x\n",
623 		 &dst_addr->sin_addr, dst_addr->sin_port);
624 
625 	ib_conn->state = ISER_CONN_PENDING;
626 
627 	iser_conn_get(ib_conn); /* ref ib conn's cma id */
628 	ib_conn->cma_id = rdma_create_id(iser_cma_handler,
629 					     (void *)ib_conn,
630 					     RDMA_PS_TCP, IB_QPT_RC);
631 	if (IS_ERR(ib_conn->cma_id)) {
632 		err = PTR_ERR(ib_conn->cma_id);
633 		iser_err("rdma_create_id failed: %d\n", err);
634 		goto id_failure;
635 	}
636 
637 	src = (struct sockaddr *)src_addr;
638 	dst = (struct sockaddr *)dst_addr;
639 	err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
640 	if (err) {
641 		iser_err("rdma_resolve_addr failed: %d\n", err);
642 		goto addr_failure;
643 	}
644 
645 	if (!non_blocking) {
646 		wait_event_interruptible(ib_conn->wait,
647 					 (ib_conn->state != ISER_CONN_PENDING));
648 
649 		if (ib_conn->state != ISER_CONN_UP) {
650 			err =  -EIO;
651 			goto connect_failure;
652 		}
653 	}
654 
655 	mutex_lock(&ig.connlist_mutex);
656 	list_add(&ib_conn->conn_list, &ig.connlist);
657 	mutex_unlock(&ig.connlist_mutex);
658 	return 0;
659 
660 id_failure:
661 	ib_conn->cma_id = NULL;
662 addr_failure:
663 	ib_conn->state = ISER_CONN_DOWN;
664 	iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */
665 connect_failure:
666 	iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
667 	return err;
668 }
669 
670 /**
671  * iser_reg_page_vec - Register physical memory
672  *
673  * returns: 0 on success, errno code on failure
674  */
675 int iser_reg_page_vec(struct iser_conn     *ib_conn,
676 		      struct iser_page_vec *page_vec,
677 		      struct iser_mem_reg  *mem_reg)
678 {
679 	struct ib_pool_fmr *mem;
680 	u64		   io_addr;
681 	u64		   *page_list;
682 	int		   status;
683 
684 	page_list = page_vec->pages;
685 	io_addr	  = page_list[0];
686 
687 	mem  = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
688 				    page_list,
689 				    page_vec->length,
690 				    io_addr);
691 
692 	if (IS_ERR(mem)) {
693 		status = (int)PTR_ERR(mem);
694 		iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
695 		return status;
696 	}
697 
698 	mem_reg->lkey  = mem->fmr->lkey;
699 	mem_reg->rkey  = mem->fmr->rkey;
700 	mem_reg->len   = page_vec->length * SIZE_4K;
701 	mem_reg->va    = io_addr;
702 	mem_reg->is_fmr = 1;
703 	mem_reg->mem_h = (void *)mem;
704 
705 	mem_reg->va   += page_vec->offset;
706 	mem_reg->len   = page_vec->data_size;
707 
708 	iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
709 		 "entry[0]: (0x%08lx,%ld)] -> "
710 		 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
711 		 page_vec, page_vec->length,
712 		 (unsigned long)page_vec->pages[0],
713 		 (unsigned long)page_vec->data_size,
714 		 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
715 		 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
716 	return 0;
717 }
718 
719 /**
720  * Unregister (previosuly registered) memory.
721  */
722 void iser_unreg_mem(struct iser_mem_reg *reg)
723 {
724 	int ret;
725 
726 	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
727 
728 	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
729 	if (ret)
730 		iser_err("ib_fmr_pool_unmap failed %d\n", ret);
731 
732 	reg->mem_h = NULL;
733 }
734 
735 int iser_post_recvl(struct iser_conn *ib_conn)
736 {
737 	struct ib_recv_wr rx_wr, *rx_wr_failed;
738 	struct ib_sge	  sge;
739 	int ib_ret;
740 
741 	sge.addr   = ib_conn->login_resp_dma;
742 	sge.length = ISER_RX_LOGIN_SIZE;
743 	sge.lkey   = ib_conn->device->mr->lkey;
744 
745 	rx_wr.wr_id   = (unsigned long)ib_conn->login_resp_buf;
746 	rx_wr.sg_list = &sge;
747 	rx_wr.num_sge = 1;
748 	rx_wr.next    = NULL;
749 
750 	ib_conn->post_recv_buf_count++;
751 	ib_ret	= ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
752 	if (ib_ret) {
753 		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
754 		ib_conn->post_recv_buf_count--;
755 	}
756 	return ib_ret;
757 }
758 
759 int iser_post_recvm(struct iser_conn *ib_conn, int count)
760 {
761 	struct ib_recv_wr *rx_wr, *rx_wr_failed;
762 	int i, ib_ret;
763 	unsigned int my_rx_head = ib_conn->rx_desc_head;
764 	struct iser_rx_desc *rx_desc;
765 
766 	for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
767 		rx_desc		= &ib_conn->rx_descs[my_rx_head];
768 		rx_wr->wr_id	= (unsigned long)rx_desc;
769 		rx_wr->sg_list	= &rx_desc->rx_sg;
770 		rx_wr->num_sge	= 1;
771 		rx_wr->next	= rx_wr + 1;
772 		my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
773 	}
774 
775 	rx_wr--;
776 	rx_wr->next = NULL; /* mark end of work requests list */
777 
778 	ib_conn->post_recv_buf_count += count;
779 	ib_ret	= ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
780 	if (ib_ret) {
781 		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
782 		ib_conn->post_recv_buf_count -= count;
783 	} else
784 		ib_conn->rx_desc_head = my_rx_head;
785 	return ib_ret;
786 }
787 
788 
789 /**
790  * iser_start_send - Initiate a Send DTO operation
791  *
792  * returns 0 on success, -1 on failure
793  */
794 int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
795 {
796 	int		  ib_ret;
797 	struct ib_send_wr send_wr, *send_wr_failed;
798 
799 	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
800 		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
801 
802 	send_wr.next	   = NULL;
803 	send_wr.wr_id	   = (unsigned long)tx_desc;
804 	send_wr.sg_list	   = tx_desc->tx_sg;
805 	send_wr.num_sge	   = tx_desc->num_sge;
806 	send_wr.opcode	   = IB_WR_SEND;
807 	send_wr.send_flags = IB_SEND_SIGNALED;
808 
809 	atomic_inc(&ib_conn->post_send_buf_count);
810 
811 	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
812 	if (ib_ret) {
813 		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
814 		atomic_dec(&ib_conn->post_send_buf_count);
815 	}
816 	return ib_ret;
817 }
818 
819 static void iser_handle_comp_error(struct iser_tx_desc *desc,
820 				struct iser_conn *ib_conn)
821 {
822 	if (desc && desc->type == ISCSI_TX_DATAOUT)
823 		kmem_cache_free(ig.desc_cache, desc);
824 
825 	if (ib_conn->post_recv_buf_count == 0 &&
826 	    atomic_read(&ib_conn->post_send_buf_count) == 0) {
827 		/* getting here when the state is UP means that the conn is *
828 		 * being terminated asynchronously from the iSCSI layer's   *
829 		 * perspective.                                             */
830 		if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
831 		    ISER_CONN_TERMINATING))
832 			iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
833 					   ISCSI_ERR_CONN_FAILED);
834 
835 		/* no more non completed posts to the QP, complete the
836 		 * termination process w.o worrying on disconnect event */
837 		ib_conn->state = ISER_CONN_DOWN;
838 		wake_up_interruptible(&ib_conn->wait);
839 	}
840 }
841 
842 static int iser_drain_tx_cq(struct iser_device  *device, int cq_index)
843 {
844 	struct ib_cq  *cq = device->tx_cq[cq_index];
845 	struct ib_wc  wc;
846 	struct iser_tx_desc *tx_desc;
847 	struct iser_conn *ib_conn;
848 	int completed_tx = 0;
849 
850 	while (ib_poll_cq(cq, 1, &wc) == 1) {
851 		tx_desc	= (struct iser_tx_desc *) (unsigned long) wc.wr_id;
852 		ib_conn = wc.qp->qp_context;
853 		if (wc.status == IB_WC_SUCCESS) {
854 			if (wc.opcode == IB_WC_SEND)
855 				iser_snd_completion(tx_desc, ib_conn);
856 			else
857 				iser_err("expected opcode %d got %d\n",
858 					IB_WC_SEND, wc.opcode);
859 		} else {
860 			iser_err("tx id %llx status %d vend_err %x\n",
861 				wc.wr_id, wc.status, wc.vendor_err);
862 			atomic_dec(&ib_conn->post_send_buf_count);
863 			iser_handle_comp_error(tx_desc, ib_conn);
864 		}
865 		completed_tx++;
866 	}
867 	return completed_tx;
868 }
869 
870 
871 static void iser_cq_tasklet_fn(unsigned long data)
872 {
873 	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
874 	struct iser_device  *device = cq_desc->device;
875 	int cq_index = cq_desc->cq_index;
876 	struct ib_cq	     *cq = device->rx_cq[cq_index];
877 	 struct ib_wc	     wc;
878 	 struct iser_rx_desc *desc;
879 	 unsigned long	     xfer_len;
880 	struct iser_conn *ib_conn;
881 	int completed_tx, completed_rx;
882 	completed_tx = completed_rx = 0;
883 
884 	while (ib_poll_cq(cq, 1, &wc) == 1) {
885 		desc	 = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
886 		BUG_ON(desc == NULL);
887 		ib_conn = wc.qp->qp_context;
888 		if (wc.status == IB_WC_SUCCESS) {
889 			if (wc.opcode == IB_WC_RECV) {
890 				xfer_len = (unsigned long)wc.byte_len;
891 				iser_rcv_completion(desc, xfer_len, ib_conn);
892 			} else
893 				iser_err("expected opcode %d got %d\n",
894 					IB_WC_RECV, wc.opcode);
895 		} else {
896 			if (wc.status != IB_WC_WR_FLUSH_ERR)
897 				iser_err("rx id %llx status %d vend_err %x\n",
898 					wc.wr_id, wc.status, wc.vendor_err);
899 			ib_conn->post_recv_buf_count--;
900 			iser_handle_comp_error(NULL, ib_conn);
901 		}
902 		completed_rx++;
903 		if (!(completed_rx & 63))
904 			completed_tx += iser_drain_tx_cq(device, cq_index);
905 	}
906 	/* #warning "it is assumed here that arming CQ only once its empty" *
907 	 * " would not cause interrupts to be missed"                       */
908 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
909 
910 	completed_tx += iser_drain_tx_cq(device, cq_index);
911 	iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
912 }
913 
914 static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
915 {
916 	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
917 	struct iser_device  *device = cq_desc->device;
918 	int cq_index = cq_desc->cq_index;
919 
920 	tasklet_schedule(&device->cq_tasklet[cq_index]);
921 }
922