1 /*
2  * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
3  * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  */
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <rdma/ib_cache.h>
45 #include <scsi/scsi_proto.h>
46 #include <scsi/scsi_tcq.h>
47 #include <target/target_core_base.h>
48 #include <target/target_core_fabric.h>
49 #include "ib_srpt.h"
50 
51 /* Name of this kernel module. */
52 #define DRV_NAME		"ib_srpt"
53 #define DRV_VERSION		"2.0.0"
54 #define DRV_RELDATE		"2011-02-14"
55 
56 #define SRPT_ID_STRING	"Linux SRP target"
57 
58 #undef pr_fmt
59 #define pr_fmt(fmt) DRV_NAME " " fmt
60 
61 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
63 		   "v" DRV_VERSION " (" DRV_RELDATE ")");
64 MODULE_LICENSE("Dual BSD/GPL");
65 
66 /*
67  * Global Variables
68  */
69 
70 static u64 srpt_service_guid;
71 static DEFINE_SPINLOCK(srpt_dev_lock);	/* Protects srpt_dev_list. */
72 static LIST_HEAD(srpt_dev_list);	/* List of srpt_device structures. */
73 
74 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
75 module_param(srp_max_req_size, int, 0444);
76 MODULE_PARM_DESC(srp_max_req_size,
77 		 "Maximum size of SRP request messages in bytes.");
78 
79 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
80 module_param(srpt_srq_size, int, 0444);
81 MODULE_PARM_DESC(srpt_srq_size,
82 		 "Shared receive queue (SRQ) size.");
83 
84 static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
85 {
86 	return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
87 }
88 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
89 		  0444);
90 MODULE_PARM_DESC(srpt_service_guid,
91 		 "Using this value for ioc_guid, id_ext, and cm_listen_id"
92 		 " instead of using the node_guid of the first HCA.");
93 
94 static struct ib_client srpt_client;
95 static void srpt_release_cmd(struct se_cmd *se_cmd);
96 static void srpt_free_ch(struct kref *kref);
97 static int srpt_queue_status(struct se_cmd *cmd);
98 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
99 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
100 static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
101 
102 /*
103  * The only allowed channel state changes are those that change the channel
104  * state into a state with a higher numerical value. Hence the new > prev test.
105  */
106 static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
107 {
108 	unsigned long flags;
109 	enum rdma_ch_state prev;
110 	bool changed = false;
111 
112 	spin_lock_irqsave(&ch->spinlock, flags);
113 	prev = ch->state;
114 	if (new > prev) {
115 		ch->state = new;
116 		changed = true;
117 	}
118 	spin_unlock_irqrestore(&ch->spinlock, flags);
119 
120 	return changed;
121 }
122 
123 /**
124  * srpt_event_handler - asynchronous IB event callback function
125  * @handler: IB event handler registered by ib_register_event_handler().
126  * @event: Description of the event that occurred.
127  *
128  * Callback function called by the InfiniBand core when an asynchronous IB
129  * event occurs. This callback may occur in interrupt context. See also
130  * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
131  * Architecture Specification.
132  */
133 static void srpt_event_handler(struct ib_event_handler *handler,
134 			       struct ib_event *event)
135 {
136 	struct srpt_device *sdev;
137 	struct srpt_port *sport;
138 	u8 port_num;
139 
140 	sdev = ib_get_client_data(event->device, &srpt_client);
141 	if (!sdev || sdev->device != event->device)
142 		return;
143 
144 	pr_debug("ASYNC event= %d on device= %s\n", event->event,
145 		 sdev->device->name);
146 
147 	switch (event->event) {
148 	case IB_EVENT_PORT_ERR:
149 		port_num = event->element.port_num - 1;
150 		if (port_num < sdev->device->phys_port_cnt) {
151 			sport = &sdev->port[port_num];
152 			sport->lid = 0;
153 			sport->sm_lid = 0;
154 		} else {
155 			WARN(true, "event %d: port_num %d out of range 1..%d\n",
156 			     event->event, port_num + 1,
157 			     sdev->device->phys_port_cnt);
158 		}
159 		break;
160 	case IB_EVENT_PORT_ACTIVE:
161 	case IB_EVENT_LID_CHANGE:
162 	case IB_EVENT_PKEY_CHANGE:
163 	case IB_EVENT_SM_CHANGE:
164 	case IB_EVENT_CLIENT_REREGISTER:
165 	case IB_EVENT_GID_CHANGE:
166 		/* Refresh port data asynchronously. */
167 		port_num = event->element.port_num - 1;
168 		if (port_num < sdev->device->phys_port_cnt) {
169 			sport = &sdev->port[port_num];
170 			if (!sport->lid && !sport->sm_lid)
171 				schedule_work(&sport->work);
172 		} else {
173 			WARN(true, "event %d: port_num %d out of range 1..%d\n",
174 			     event->event, port_num + 1,
175 			     sdev->device->phys_port_cnt);
176 		}
177 		break;
178 	default:
179 		pr_err("received unrecognized IB event %d\n", event->event);
180 		break;
181 	}
182 }
183 
184 /**
185  * srpt_srq_event - SRQ event callback function
186  * @event: Description of the event that occurred.
187  * @ctx: Context pointer specified at SRQ creation time.
188  */
189 static void srpt_srq_event(struct ib_event *event, void *ctx)
190 {
191 	pr_debug("SRQ event %d\n", event->event);
192 }
193 
194 static const char *get_ch_state_name(enum rdma_ch_state s)
195 {
196 	switch (s) {
197 	case CH_CONNECTING:
198 		return "connecting";
199 	case CH_LIVE:
200 		return "live";
201 	case CH_DISCONNECTING:
202 		return "disconnecting";
203 	case CH_DRAINING:
204 		return "draining";
205 	case CH_DISCONNECTED:
206 		return "disconnected";
207 	}
208 	return "???";
209 }
210 
211 /**
212  * srpt_qp_event - QP event callback function
213  * @event: Description of the event that occurred.
214  * @ch: SRPT RDMA channel.
215  */
216 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
217 {
218 	pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
219 		 event->event, ch, ch->sess_name, ch->state);
220 
221 	switch (event->event) {
222 	case IB_EVENT_COMM_EST:
223 		ib_cm_notify(ch->ib_cm.cm_id, event->event);
224 		break;
225 	case IB_EVENT_QP_LAST_WQE_REACHED:
226 		pr_debug("%s-%d, state %s: received Last WQE event.\n",
227 			 ch->sess_name, ch->qp->qp_num,
228 			 get_ch_state_name(ch->state));
229 		break;
230 	default:
231 		pr_err("received unrecognized IB QP event %d\n", event->event);
232 		break;
233 	}
234 }
235 
236 /**
237  * srpt_set_ioc - initialize a IOUnitInfo structure
238  * @c_list: controller list.
239  * @slot: one-based slot number.
240  * @value: four-bit value.
241  *
242  * Copies the lowest four bits of value in element slot of the array of four
243  * bit elements called c_list (controller list). The index slot is one-based.
244  */
245 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
246 {
247 	u16 id;
248 	u8 tmp;
249 
250 	id = (slot - 1) / 2;
251 	if (slot & 0x1) {
252 		tmp = c_list[id] & 0xf;
253 		c_list[id] = (value << 4) | tmp;
254 	} else {
255 		tmp = c_list[id] & 0xf0;
256 		c_list[id] = (value & 0xf) | tmp;
257 	}
258 }
259 
260 /**
261  * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
262  * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
263  *
264  * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
265  * Specification.
266  */
267 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
268 {
269 	struct ib_class_port_info *cif;
270 
271 	cif = (struct ib_class_port_info *)mad->data;
272 	memset(cif, 0, sizeof(*cif));
273 	cif->base_version = 1;
274 	cif->class_version = 1;
275 
276 	ib_set_cpi_resp_time(cif, 20);
277 	mad->mad_hdr.status = 0;
278 }
279 
280 /**
281  * srpt_get_iou - write IOUnitInfo to a management datagram
282  * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
283  *
284  * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
285  * Specification. See also section B.7, table B.6 in the SRP r16a document.
286  */
287 static void srpt_get_iou(struct ib_dm_mad *mad)
288 {
289 	struct ib_dm_iou_info *ioui;
290 	u8 slot;
291 	int i;
292 
293 	ioui = (struct ib_dm_iou_info *)mad->data;
294 	ioui->change_id = cpu_to_be16(1);
295 	ioui->max_controllers = 16;
296 
297 	/* set present for slot 1 and empty for the rest */
298 	srpt_set_ioc(ioui->controller_list, 1, 1);
299 	for (i = 1, slot = 2; i < 16; i++, slot++)
300 		srpt_set_ioc(ioui->controller_list, slot, 0);
301 
302 	mad->mad_hdr.status = 0;
303 }
304 
305 /**
306  * srpt_get_ioc - write IOControllerprofile to a management datagram
307  * @sport: HCA port through which the MAD has been received.
308  * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
309  * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
310  *
311  * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
312  * Architecture Specification. See also section B.7, table B.7 in the SRP
313  * r16a document.
314  */
315 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
316 			 struct ib_dm_mad *mad)
317 {
318 	struct srpt_device *sdev = sport->sdev;
319 	struct ib_dm_ioc_profile *iocp;
320 	int send_queue_depth;
321 
322 	iocp = (struct ib_dm_ioc_profile *)mad->data;
323 
324 	if (!slot || slot > 16) {
325 		mad->mad_hdr.status
326 			= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
327 		return;
328 	}
329 
330 	if (slot > 2) {
331 		mad->mad_hdr.status
332 			= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
333 		return;
334 	}
335 
336 	if (sdev->use_srq)
337 		send_queue_depth = sdev->srq_size;
338 	else
339 		send_queue_depth = min(MAX_SRPT_RQ_SIZE,
340 				       sdev->device->attrs.max_qp_wr);
341 
342 	memset(iocp, 0, sizeof(*iocp));
343 	strcpy(iocp->id_string, SRPT_ID_STRING);
344 	iocp->guid = cpu_to_be64(srpt_service_guid);
345 	iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
346 	iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
347 	iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
348 	iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
349 	iocp->subsys_device_id = 0x0;
350 	iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
351 	iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
352 	iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
353 	iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
354 	iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
355 	iocp->rdma_read_depth = 4;
356 	iocp->send_size = cpu_to_be32(srp_max_req_size);
357 	iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
358 					  1U << 24));
359 	iocp->num_svc_entries = 1;
360 	iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
361 		SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
362 
363 	mad->mad_hdr.status = 0;
364 }
365 
366 /**
367  * srpt_get_svc_entries - write ServiceEntries to a management datagram
368  * @ioc_guid: I/O controller GUID to use in reply.
369  * @slot: I/O controller number.
370  * @hi: End of the range of service entries to be specified in the reply.
371  * @lo: Start of the range of service entries to be specified in the reply..
372  * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
373  *
374  * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
375  * Specification. See also section B.7, table B.8 in the SRP r16a document.
376  */
377 static void srpt_get_svc_entries(u64 ioc_guid,
378 				 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
379 {
380 	struct ib_dm_svc_entries *svc_entries;
381 
382 	WARN_ON(!ioc_guid);
383 
384 	if (!slot || slot > 16) {
385 		mad->mad_hdr.status
386 			= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
387 		return;
388 	}
389 
390 	if (slot > 2 || lo > hi || hi > 1) {
391 		mad->mad_hdr.status
392 			= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
393 		return;
394 	}
395 
396 	svc_entries = (struct ib_dm_svc_entries *)mad->data;
397 	memset(svc_entries, 0, sizeof(*svc_entries));
398 	svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
399 	snprintf(svc_entries->service_entries[0].name,
400 		 sizeof(svc_entries->service_entries[0].name),
401 		 "%s%016llx",
402 		 SRP_SERVICE_NAME_PREFIX,
403 		 ioc_guid);
404 
405 	mad->mad_hdr.status = 0;
406 }
407 
408 /**
409  * srpt_mgmt_method_get - process a received management datagram
410  * @sp:      HCA port through which the MAD has been received.
411  * @rq_mad:  received MAD.
412  * @rsp_mad: response MAD.
413  */
414 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
415 				 struct ib_dm_mad *rsp_mad)
416 {
417 	u16 attr_id;
418 	u32 slot;
419 	u8 hi, lo;
420 
421 	attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
422 	switch (attr_id) {
423 	case DM_ATTR_CLASS_PORT_INFO:
424 		srpt_get_class_port_info(rsp_mad);
425 		break;
426 	case DM_ATTR_IOU_INFO:
427 		srpt_get_iou(rsp_mad);
428 		break;
429 	case DM_ATTR_IOC_PROFILE:
430 		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
431 		srpt_get_ioc(sp, slot, rsp_mad);
432 		break;
433 	case DM_ATTR_SVC_ENTRIES:
434 		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
435 		hi = (u8) ((slot >> 8) & 0xff);
436 		lo = (u8) (slot & 0xff);
437 		slot = (u16) ((slot >> 16) & 0xffff);
438 		srpt_get_svc_entries(srpt_service_guid,
439 				     slot, hi, lo, rsp_mad);
440 		break;
441 	default:
442 		rsp_mad->mad_hdr.status =
443 		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
444 		break;
445 	}
446 }
447 
448 /**
449  * srpt_mad_send_handler - MAD send completion callback
450  * @mad_agent: Return value of ib_register_mad_agent().
451  * @mad_wc: Work completion reporting that the MAD has been sent.
452  */
453 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
454 				  struct ib_mad_send_wc *mad_wc)
455 {
456 	rdma_destroy_ah(mad_wc->send_buf->ah);
457 	ib_free_send_mad(mad_wc->send_buf);
458 }
459 
460 /**
461  * srpt_mad_recv_handler - MAD reception callback function
462  * @mad_agent: Return value of ib_register_mad_agent().
463  * @send_buf: Not used.
464  * @mad_wc: Work completion reporting that a MAD has been received.
465  */
466 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
467 				  struct ib_mad_send_buf *send_buf,
468 				  struct ib_mad_recv_wc *mad_wc)
469 {
470 	struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
471 	struct ib_ah *ah;
472 	struct ib_mad_send_buf *rsp;
473 	struct ib_dm_mad *dm_mad;
474 
475 	if (!mad_wc || !mad_wc->recv_buf.mad)
476 		return;
477 
478 	ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
479 				  mad_wc->recv_buf.grh, mad_agent->port_num);
480 	if (IS_ERR(ah))
481 		goto err;
482 
483 	BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
484 
485 	rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
486 				 mad_wc->wc->pkey_index, 0,
487 				 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
488 				 GFP_KERNEL,
489 				 IB_MGMT_BASE_VERSION);
490 	if (IS_ERR(rsp))
491 		goto err_rsp;
492 
493 	rsp->ah = ah;
494 
495 	dm_mad = rsp->mad;
496 	memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
497 	dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
498 	dm_mad->mad_hdr.status = 0;
499 
500 	switch (mad_wc->recv_buf.mad->mad_hdr.method) {
501 	case IB_MGMT_METHOD_GET:
502 		srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
503 		break;
504 	case IB_MGMT_METHOD_SET:
505 		dm_mad->mad_hdr.status =
506 		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
507 		break;
508 	default:
509 		dm_mad->mad_hdr.status =
510 		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
511 		break;
512 	}
513 
514 	if (!ib_post_send_mad(rsp, NULL)) {
515 		ib_free_recv_mad(mad_wc);
516 		/* will destroy_ah & free_send_mad in send completion */
517 		return;
518 	}
519 
520 	ib_free_send_mad(rsp);
521 
522 err_rsp:
523 	rdma_destroy_ah(ah);
524 err:
525 	ib_free_recv_mad(mad_wc);
526 }
527 
528 static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
529 {
530 	const __be16 *g = (const __be16 *)guid;
531 
532 	return snprintf(buf, size, "%04x:%04x:%04x:%04x",
533 			be16_to_cpu(g[0]), be16_to_cpu(g[1]),
534 			be16_to_cpu(g[2]), be16_to_cpu(g[3]));
535 }
536 
537 /**
538  * srpt_refresh_port - configure a HCA port
539  * @sport: SRPT HCA port.
540  *
541  * Enable InfiniBand management datagram processing, update the cached sm_lid,
542  * lid and gid values, and register a callback function for processing MADs
543  * on the specified port.
544  *
545  * Note: It is safe to call this function more than once for the same port.
546  */
547 static int srpt_refresh_port(struct srpt_port *sport)
548 {
549 	struct ib_mad_reg_req reg_req;
550 	struct ib_port_modify port_modify;
551 	struct ib_port_attr port_attr;
552 	int ret;
553 
554 	memset(&port_modify, 0, sizeof(port_modify));
555 	port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
556 	port_modify.clr_port_cap_mask = 0;
557 
558 	ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
559 	if (ret)
560 		goto err_mod_port;
561 
562 	ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
563 	if (ret)
564 		goto err_query_port;
565 
566 	sport->sm_lid = port_attr.sm_lid;
567 	sport->lid = port_attr.lid;
568 
569 	ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
570 			   NULL);
571 	if (ret)
572 		goto err_query_port;
573 
574 	sport->port_guid_wwn.priv = sport;
575 	srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
576 			 &sport->gid.global.interface_id);
577 	sport->port_gid_wwn.priv = sport;
578 	snprintf(sport->port_gid, sizeof(sport->port_gid),
579 		 "0x%016llx%016llx",
580 		 be64_to_cpu(sport->gid.global.subnet_prefix),
581 		 be64_to_cpu(sport->gid.global.interface_id));
582 
583 	if (!sport->mad_agent) {
584 		memset(&reg_req, 0, sizeof(reg_req));
585 		reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
586 		reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
587 		set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
588 		set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
589 
590 		sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
591 							 sport->port,
592 							 IB_QPT_GSI,
593 							 &reg_req, 0,
594 							 srpt_mad_send_handler,
595 							 srpt_mad_recv_handler,
596 							 sport, 0);
597 		if (IS_ERR(sport->mad_agent)) {
598 			ret = PTR_ERR(sport->mad_agent);
599 			sport->mad_agent = NULL;
600 			goto err_query_port;
601 		}
602 	}
603 
604 	return 0;
605 
606 err_query_port:
607 
608 	port_modify.set_port_cap_mask = 0;
609 	port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
610 	ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
611 
612 err_mod_port:
613 
614 	return ret;
615 }
616 
617 /**
618  * srpt_unregister_mad_agent - unregister MAD callback functions
619  * @sdev: SRPT HCA pointer.
620  *
621  * Note: It is safe to call this function more than once for the same device.
622  */
623 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
624 {
625 	struct ib_port_modify port_modify = {
626 		.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
627 	};
628 	struct srpt_port *sport;
629 	int i;
630 
631 	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
632 		sport = &sdev->port[i - 1];
633 		WARN_ON(sport->port != i);
634 		if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
635 			pr_err("disabling MAD processing failed.\n");
636 		if (sport->mad_agent) {
637 			ib_unregister_mad_agent(sport->mad_agent);
638 			sport->mad_agent = NULL;
639 		}
640 	}
641 }
642 
643 /**
644  * srpt_alloc_ioctx - allocate a SRPT I/O context structure
645  * @sdev: SRPT HCA pointer.
646  * @ioctx_size: I/O context size.
647  * @dma_size: Size of I/O context DMA buffer.
648  * @dir: DMA data direction.
649  */
650 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
651 					   int ioctx_size, int dma_size,
652 					   enum dma_data_direction dir)
653 {
654 	struct srpt_ioctx *ioctx;
655 
656 	ioctx = kmalloc(ioctx_size, GFP_KERNEL);
657 	if (!ioctx)
658 		goto err;
659 
660 	ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
661 	if (!ioctx->buf)
662 		goto err_free_ioctx;
663 
664 	ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
665 	if (ib_dma_mapping_error(sdev->device, ioctx->dma))
666 		goto err_free_buf;
667 
668 	return ioctx;
669 
670 err_free_buf:
671 	kfree(ioctx->buf);
672 err_free_ioctx:
673 	kfree(ioctx);
674 err:
675 	return NULL;
676 }
677 
678 /**
679  * srpt_free_ioctx - free a SRPT I/O context structure
680  * @sdev: SRPT HCA pointer.
681  * @ioctx: I/O context pointer.
682  * @dma_size: Size of I/O context DMA buffer.
683  * @dir: DMA data direction.
684  */
685 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
686 			    int dma_size, enum dma_data_direction dir)
687 {
688 	if (!ioctx)
689 		return;
690 
691 	ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
692 	kfree(ioctx->buf);
693 	kfree(ioctx);
694 }
695 
696 /**
697  * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
698  * @sdev:       Device to allocate the I/O context ring for.
699  * @ring_size:  Number of elements in the I/O context ring.
700  * @ioctx_size: I/O context size.
701  * @dma_size:   DMA buffer size.
702  * @dir:        DMA data direction.
703  */
704 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
705 				int ring_size, int ioctx_size,
706 				int dma_size, enum dma_data_direction dir)
707 {
708 	struct srpt_ioctx **ring;
709 	int i;
710 
711 	WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
712 		&& ioctx_size != sizeof(struct srpt_send_ioctx));
713 
714 	ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
715 	if (!ring)
716 		goto out;
717 	for (i = 0; i < ring_size; ++i) {
718 		ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
719 		if (!ring[i])
720 			goto err;
721 		ring[i]->index = i;
722 	}
723 	goto out;
724 
725 err:
726 	while (--i >= 0)
727 		srpt_free_ioctx(sdev, ring[i], dma_size, dir);
728 	kfree(ring);
729 	ring = NULL;
730 out:
731 	return ring;
732 }
733 
734 /**
735  * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
736  * @ioctx_ring: I/O context ring to be freed.
737  * @sdev: SRPT HCA pointer.
738  * @ring_size: Number of ring elements.
739  * @dma_size: Size of I/O context DMA buffer.
740  * @dir: DMA data direction.
741  */
742 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
743 				 struct srpt_device *sdev, int ring_size,
744 				 int dma_size, enum dma_data_direction dir)
745 {
746 	int i;
747 
748 	if (!ioctx_ring)
749 		return;
750 
751 	for (i = 0; i < ring_size; ++i)
752 		srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
753 	kfree(ioctx_ring);
754 }
755 
756 /**
757  * srpt_set_cmd_state - set the state of a SCSI command
758  * @ioctx: Send I/O context.
759  * @new: New I/O context state.
760  *
761  * Does not modify the state of aborted commands. Returns the previous command
762  * state.
763  */
764 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
765 						  enum srpt_command_state new)
766 {
767 	enum srpt_command_state previous;
768 
769 	previous = ioctx->state;
770 	if (previous != SRPT_STATE_DONE)
771 		ioctx->state = new;
772 
773 	return previous;
774 }
775 
776 /**
777  * srpt_test_and_set_cmd_state - test and set the state of a command
778  * @ioctx: Send I/O context.
779  * @old: Current I/O context state.
780  * @new: New I/O context state.
781  *
782  * Returns true if and only if the previous command state was equal to 'old'.
783  */
784 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
785 					enum srpt_command_state old,
786 					enum srpt_command_state new)
787 {
788 	enum srpt_command_state previous;
789 
790 	WARN_ON(!ioctx);
791 	WARN_ON(old == SRPT_STATE_DONE);
792 	WARN_ON(new == SRPT_STATE_NEW);
793 
794 	previous = ioctx->state;
795 	if (previous == old)
796 		ioctx->state = new;
797 
798 	return previous == old;
799 }
800 
801 /**
802  * srpt_post_recv - post an IB receive request
803  * @sdev: SRPT HCA pointer.
804  * @ch: SRPT RDMA channel.
805  * @ioctx: Receive I/O context pointer.
806  */
807 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
808 			  struct srpt_recv_ioctx *ioctx)
809 {
810 	struct ib_sge list;
811 	struct ib_recv_wr wr, *bad_wr;
812 
813 	BUG_ON(!sdev);
814 	list.addr = ioctx->ioctx.dma;
815 	list.length = srp_max_req_size;
816 	list.lkey = sdev->lkey;
817 
818 	ioctx->ioctx.cqe.done = srpt_recv_done;
819 	wr.wr_cqe = &ioctx->ioctx.cqe;
820 	wr.next = NULL;
821 	wr.sg_list = &list;
822 	wr.num_sge = 1;
823 
824 	if (sdev->use_srq)
825 		return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
826 	else
827 		return ib_post_recv(ch->qp, &wr, &bad_wr);
828 }
829 
830 /**
831  * srpt_zerolength_write - perform a zero-length RDMA write
832  * @ch: SRPT RDMA channel.
833  *
834  * A quote from the InfiniBand specification: C9-88: For an HCA responder
835  * using Reliable Connection service, for each zero-length RDMA READ or WRITE
836  * request, the R_Key shall not be validated, even if the request includes
837  * Immediate data.
838  */
839 static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
840 {
841 	struct ib_send_wr wr, *bad_wr;
842 
843 	pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
844 		 ch->qp->qp_num);
845 
846 	memset(&wr, 0, sizeof(wr));
847 	wr.opcode = IB_WR_RDMA_WRITE;
848 	wr.wr_cqe = &ch->zw_cqe;
849 	wr.send_flags = IB_SEND_SIGNALED;
850 	return ib_post_send(ch->qp, &wr, &bad_wr);
851 }
852 
853 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
854 {
855 	struct srpt_rdma_ch *ch = cq->cq_context;
856 
857 	pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
858 		 wc->status);
859 
860 	if (wc->status == IB_WC_SUCCESS) {
861 		srpt_process_wait_list(ch);
862 	} else {
863 		if (srpt_set_ch_state(ch, CH_DISCONNECTED))
864 			schedule_work(&ch->release_work);
865 		else
866 			pr_debug("%s-%d: already disconnected.\n",
867 				 ch->sess_name, ch->qp->qp_num);
868 	}
869 }
870 
871 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
872 		struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
873 		unsigned *sg_cnt)
874 {
875 	enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
876 	struct srpt_rdma_ch *ch = ioctx->ch;
877 	struct scatterlist *prev = NULL;
878 	unsigned prev_nents;
879 	int ret, i;
880 
881 	if (nbufs == 1) {
882 		ioctx->rw_ctxs = &ioctx->s_rw_ctx;
883 	} else {
884 		ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
885 			GFP_KERNEL);
886 		if (!ioctx->rw_ctxs)
887 			return -ENOMEM;
888 	}
889 
890 	for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
891 		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
892 		u64 remote_addr = be64_to_cpu(db->va);
893 		u32 size = be32_to_cpu(db->len);
894 		u32 rkey = be32_to_cpu(db->key);
895 
896 		ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
897 				i < nbufs - 1);
898 		if (ret)
899 			goto unwind;
900 
901 		ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
902 				ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
903 		if (ret < 0) {
904 			target_free_sgl(ctx->sg, ctx->nents);
905 			goto unwind;
906 		}
907 
908 		ioctx->n_rdma += ret;
909 		ioctx->n_rw_ctx++;
910 
911 		if (prev) {
912 			sg_unmark_end(&prev[prev_nents - 1]);
913 			sg_chain(prev, prev_nents + 1, ctx->sg);
914 		} else {
915 			*sg = ctx->sg;
916 		}
917 
918 		prev = ctx->sg;
919 		prev_nents = ctx->nents;
920 
921 		*sg_cnt += ctx->nents;
922 	}
923 
924 	return 0;
925 
926 unwind:
927 	while (--i >= 0) {
928 		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
929 
930 		rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
931 				ctx->sg, ctx->nents, dir);
932 		target_free_sgl(ctx->sg, ctx->nents);
933 	}
934 	if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
935 		kfree(ioctx->rw_ctxs);
936 	return ret;
937 }
938 
939 static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
940 				    struct srpt_send_ioctx *ioctx)
941 {
942 	enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
943 	int i;
944 
945 	for (i = 0; i < ioctx->n_rw_ctx; i++) {
946 		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
947 
948 		rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
949 				ctx->sg, ctx->nents, dir);
950 		target_free_sgl(ctx->sg, ctx->nents);
951 	}
952 
953 	if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
954 		kfree(ioctx->rw_ctxs);
955 }
956 
957 static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
958 {
959 	/*
960 	 * The pointer computations below will only be compiled correctly
961 	 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
962 	 * whether srp_cmd::add_data has been declared as a byte pointer.
963 	 */
964 	BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
965 		     !__same_type(srp_cmd->add_data[0], (u8)0));
966 
967 	/*
968 	 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
969 	 * CDB LENGTH' field are reserved and the size in bytes of this field
970 	 * is four times the value specified in bits 3..7. Hence the "& ~3".
971 	 */
972 	return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
973 }
974 
975 /**
976  * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
977  * @ioctx: Pointer to the I/O context associated with the request.
978  * @srp_cmd: Pointer to the SRP_CMD request data.
979  * @dir: Pointer to the variable to which the transfer direction will be
980  *   written.
981  * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
982  * @sg_cnt: [out] length of @sg.
983  * @data_len: Pointer to the variable to which the total data length of all
984  *   descriptors in the SRP_CMD request will be written.
985  *
986  * This function initializes ioctx->nrbuf and ioctx->r_bufs.
987  *
988  * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
989  * -ENOMEM when memory allocation fails and zero upon success.
990  */
991 static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
992 		struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
993 		struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
994 {
995 	BUG_ON(!dir);
996 	BUG_ON(!data_len);
997 
998 	/*
999 	 * The lower four bits of the buffer format field contain the DATA-IN
1000 	 * buffer descriptor format, and the highest four bits contain the
1001 	 * DATA-OUT buffer descriptor format.
1002 	 */
1003 	if (srp_cmd->buf_fmt & 0xf)
1004 		/* DATA-IN: transfer data from target to initiator (read). */
1005 		*dir = DMA_FROM_DEVICE;
1006 	else if (srp_cmd->buf_fmt >> 4)
1007 		/* DATA-OUT: transfer data from initiator to target (write). */
1008 		*dir = DMA_TO_DEVICE;
1009 	else
1010 		*dir = DMA_NONE;
1011 
1012 	/* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
1013 	ioctx->cmd.data_direction = *dir;
1014 
1015 	if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
1016 	    ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
1017 	    	struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
1018 
1019 		*data_len = be32_to_cpu(db->len);
1020 		return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
1021 	} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
1022 		   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
1023 		struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
1024 		int nbufs = be32_to_cpu(idb->table_desc.len) /
1025 				sizeof(struct srp_direct_buf);
1026 
1027 		if (nbufs >
1028 		    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
1029 			pr_err("received unsupported SRP_CMD request"
1030 			       " type (%u out + %u in != %u / %zu)\n",
1031 			       srp_cmd->data_out_desc_cnt,
1032 			       srp_cmd->data_in_desc_cnt,
1033 			       be32_to_cpu(idb->table_desc.len),
1034 			       sizeof(struct srp_direct_buf));
1035 			return -EINVAL;
1036 		}
1037 
1038 		*data_len = be32_to_cpu(idb->len);
1039 		return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
1040 				sg, sg_cnt);
1041 	} else {
1042 		*data_len = 0;
1043 		return 0;
1044 	}
1045 }
1046 
1047 /**
1048  * srpt_init_ch_qp - initialize queue pair attributes
1049  * @ch: SRPT RDMA channel.
1050  * @qp: Queue pair pointer.
1051  *
1052  * Initialized the attributes of queue pair 'qp' by allowing local write,
1053  * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
1054  */
1055 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1056 {
1057 	struct ib_qp_attr *attr;
1058 	int ret;
1059 
1060 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1061 	if (!attr)
1062 		return -ENOMEM;
1063 
1064 	attr->qp_state = IB_QPS_INIT;
1065 	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1066 	attr->port_num = ch->sport->port;
1067 
1068 	ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1069 				  ch->pkey, &attr->pkey_index);
1070 	if (ret < 0)
1071 		pr_err("Translating pkey %#x failed (%d) - using index 0\n",
1072 		       ch->pkey, ret);
1073 
1074 	ret = ib_modify_qp(qp, attr,
1075 			   IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
1076 			   IB_QP_PKEY_INDEX);
1077 
1078 	kfree(attr);
1079 	return ret;
1080 }
1081 
1082 /**
1083  * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
1084  * @ch: channel of the queue pair.
1085  * @qp: queue pair to change the state of.
1086  *
1087  * Returns zero upon success and a negative value upon failure.
1088  *
1089  * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1090  * If this structure ever becomes larger, it might be necessary to allocate
1091  * it dynamically instead of on the stack.
1092  */
1093 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1094 {
1095 	struct ib_qp_attr qp_attr;
1096 	int attr_mask;
1097 	int ret;
1098 
1099 	qp_attr.qp_state = IB_QPS_RTR;
1100 	ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1101 	if (ret)
1102 		goto out;
1103 
1104 	qp_attr.max_dest_rd_atomic = 4;
1105 
1106 	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1107 
1108 out:
1109 	return ret;
1110 }
1111 
1112 /**
1113  * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
1114  * @ch: channel of the queue pair.
1115  * @qp: queue pair to change the state of.
1116  *
1117  * Returns zero upon success and a negative value upon failure.
1118  *
1119  * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1120  * If this structure ever becomes larger, it might be necessary to allocate
1121  * it dynamically instead of on the stack.
1122  */
1123 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1124 {
1125 	struct ib_qp_attr qp_attr;
1126 	int attr_mask;
1127 	int ret;
1128 
1129 	qp_attr.qp_state = IB_QPS_RTS;
1130 	ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1131 	if (ret)
1132 		goto out;
1133 
1134 	qp_attr.max_rd_atomic = 4;
1135 
1136 	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1137 
1138 out:
1139 	return ret;
1140 }
1141 
1142 /**
1143  * srpt_ch_qp_err - set the channel queue pair state to 'error'
1144  * @ch: SRPT RDMA channel.
1145  */
1146 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1147 {
1148 	struct ib_qp_attr qp_attr;
1149 
1150 	qp_attr.qp_state = IB_QPS_ERR;
1151 	return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1152 }
1153 
1154 /**
1155  * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
1156  * @ch: SRPT RDMA channel.
1157  */
1158 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1159 {
1160 	struct srpt_send_ioctx *ioctx;
1161 	unsigned long flags;
1162 
1163 	BUG_ON(!ch);
1164 
1165 	ioctx = NULL;
1166 	spin_lock_irqsave(&ch->spinlock, flags);
1167 	if (!list_empty(&ch->free_list)) {
1168 		ioctx = list_first_entry(&ch->free_list,
1169 					 struct srpt_send_ioctx, free_list);
1170 		list_del(&ioctx->free_list);
1171 	}
1172 	spin_unlock_irqrestore(&ch->spinlock, flags);
1173 
1174 	if (!ioctx)
1175 		return ioctx;
1176 
1177 	BUG_ON(ioctx->ch != ch);
1178 	ioctx->state = SRPT_STATE_NEW;
1179 	ioctx->n_rdma = 0;
1180 	ioctx->n_rw_ctx = 0;
1181 	ioctx->queue_status_only = false;
1182 	/*
1183 	 * transport_init_se_cmd() does not initialize all fields, so do it
1184 	 * here.
1185 	 */
1186 	memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1187 	memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1188 
1189 	return ioctx;
1190 }
1191 
1192 /**
1193  * srpt_abort_cmd - abort a SCSI command
1194  * @ioctx:   I/O context associated with the SCSI command.
1195  */
1196 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1197 {
1198 	enum srpt_command_state state;
1199 
1200 	BUG_ON(!ioctx);
1201 
1202 	/*
1203 	 * If the command is in a state where the target core is waiting for
1204 	 * the ib_srpt driver, change the state to the next state.
1205 	 */
1206 
1207 	state = ioctx->state;
1208 	switch (state) {
1209 	case SRPT_STATE_NEED_DATA:
1210 		ioctx->state = SRPT_STATE_DATA_IN;
1211 		break;
1212 	case SRPT_STATE_CMD_RSP_SENT:
1213 	case SRPT_STATE_MGMT_RSP_SENT:
1214 		ioctx->state = SRPT_STATE_DONE;
1215 		break;
1216 	default:
1217 		WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1218 			  __func__, state);
1219 		break;
1220 	}
1221 
1222 	pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
1223 		 ioctx->state, ioctx->cmd.tag);
1224 
1225 	switch (state) {
1226 	case SRPT_STATE_NEW:
1227 	case SRPT_STATE_DATA_IN:
1228 	case SRPT_STATE_MGMT:
1229 	case SRPT_STATE_DONE:
1230 		/*
1231 		 * Do nothing - defer abort processing until
1232 		 * srpt_queue_response() is invoked.
1233 		 */
1234 		break;
1235 	case SRPT_STATE_NEED_DATA:
1236 		pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1237 		transport_generic_request_failure(&ioctx->cmd,
1238 					TCM_CHECK_CONDITION_ABORT_CMD);
1239 		break;
1240 	case SRPT_STATE_CMD_RSP_SENT:
1241 		/*
1242 		 * SRP_RSP sending failed or the SRP_RSP send completion has
1243 		 * not been received in time.
1244 		 */
1245 		transport_generic_free_cmd(&ioctx->cmd, 0);
1246 		break;
1247 	case SRPT_STATE_MGMT_RSP_SENT:
1248 		transport_generic_free_cmd(&ioctx->cmd, 0);
1249 		break;
1250 	default:
1251 		WARN(1, "Unexpected command state (%d)", state);
1252 		break;
1253 	}
1254 
1255 	return state;
1256 }
1257 
1258 /**
1259  * srpt_rdma_read_done - RDMA read completion callback
1260  * @cq: Completion queue.
1261  * @wc: Work completion.
1262  *
1263  * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1264  * the data that has been transferred via IB RDMA had to be postponed until the
1265  * check_stop_free() callback.  None of this is necessary anymore and needs to
1266  * be cleaned up.
1267  */
1268 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1269 {
1270 	struct srpt_rdma_ch *ch = cq->cq_context;
1271 	struct srpt_send_ioctx *ioctx =
1272 		container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1273 
1274 	WARN_ON(ioctx->n_rdma <= 0);
1275 	atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1276 	ioctx->n_rdma = 0;
1277 
1278 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1279 		pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1280 			ioctx, wc->status);
1281 		srpt_abort_cmd(ioctx);
1282 		return;
1283 	}
1284 
1285 	if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1286 					SRPT_STATE_DATA_IN))
1287 		target_execute_cmd(&ioctx->cmd);
1288 	else
1289 		pr_err("%s[%d]: wrong state = %d\n", __func__,
1290 		       __LINE__, ioctx->state);
1291 }
1292 
1293 /**
1294  * srpt_build_cmd_rsp - build a SRP_RSP response
1295  * @ch: RDMA channel through which the request has been received.
1296  * @ioctx: I/O context associated with the SRP_CMD request. The response will
1297  *   be built in the buffer ioctx->buf points at and hence this function will
1298  *   overwrite the request data.
1299  * @tag: tag of the request for which this response is being generated.
1300  * @status: value for the STATUS field of the SRP_RSP information unit.
1301  *
1302  * Returns the size in bytes of the SRP_RSP response.
1303  *
1304  * An SRP_RSP response contains a SCSI status or service response. See also
1305  * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1306  * response. See also SPC-2 for more information about sense data.
1307  */
1308 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1309 			      struct srpt_send_ioctx *ioctx, u64 tag,
1310 			      int status)
1311 {
1312 	struct srp_rsp *srp_rsp;
1313 	const u8 *sense_data;
1314 	int sense_data_len, max_sense_len;
1315 
1316 	/*
1317 	 * The lowest bit of all SAM-3 status codes is zero (see also
1318 	 * paragraph 5.3 in SAM-3).
1319 	 */
1320 	WARN_ON(status & 1);
1321 
1322 	srp_rsp = ioctx->ioctx.buf;
1323 	BUG_ON(!srp_rsp);
1324 
1325 	sense_data = ioctx->sense_data;
1326 	sense_data_len = ioctx->cmd.scsi_sense_length;
1327 	WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1328 
1329 	memset(srp_rsp, 0, sizeof(*srp_rsp));
1330 	srp_rsp->opcode = SRP_RSP;
1331 	srp_rsp->req_lim_delta =
1332 		cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1333 	srp_rsp->tag = tag;
1334 	srp_rsp->status = status;
1335 
1336 	if (sense_data_len) {
1337 		BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1338 		max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1339 		if (sense_data_len > max_sense_len) {
1340 			pr_warn("truncated sense data from %d to %d"
1341 				" bytes\n", sense_data_len, max_sense_len);
1342 			sense_data_len = max_sense_len;
1343 		}
1344 
1345 		srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1346 		srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1347 		memcpy(srp_rsp + 1, sense_data, sense_data_len);
1348 	}
1349 
1350 	return sizeof(*srp_rsp) + sense_data_len;
1351 }
1352 
1353 /**
1354  * srpt_build_tskmgmt_rsp - build a task management response
1355  * @ch:       RDMA channel through which the request has been received.
1356  * @ioctx:    I/O context in which the SRP_RSP response will be built.
1357  * @rsp_code: RSP_CODE that will be stored in the response.
1358  * @tag:      Tag of the request for which this response is being generated.
1359  *
1360  * Returns the size in bytes of the SRP_RSP response.
1361  *
1362  * An SRP_RSP response contains a SCSI status or service response. See also
1363  * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1364  * response.
1365  */
1366 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1367 				  struct srpt_send_ioctx *ioctx,
1368 				  u8 rsp_code, u64 tag)
1369 {
1370 	struct srp_rsp *srp_rsp;
1371 	int resp_data_len;
1372 	int resp_len;
1373 
1374 	resp_data_len = 4;
1375 	resp_len = sizeof(*srp_rsp) + resp_data_len;
1376 
1377 	srp_rsp = ioctx->ioctx.buf;
1378 	BUG_ON(!srp_rsp);
1379 	memset(srp_rsp, 0, sizeof(*srp_rsp));
1380 
1381 	srp_rsp->opcode = SRP_RSP;
1382 	srp_rsp->req_lim_delta =
1383 		cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1384 	srp_rsp->tag = tag;
1385 
1386 	srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1387 	srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1388 	srp_rsp->data[3] = rsp_code;
1389 
1390 	return resp_len;
1391 }
1392 
1393 static int srpt_check_stop_free(struct se_cmd *cmd)
1394 {
1395 	struct srpt_send_ioctx *ioctx = container_of(cmd,
1396 				struct srpt_send_ioctx, cmd);
1397 
1398 	return target_put_sess_cmd(&ioctx->cmd);
1399 }
1400 
1401 /**
1402  * srpt_handle_cmd - process a SRP_CMD information unit
1403  * @ch: SRPT RDMA channel.
1404  * @recv_ioctx: Receive I/O context.
1405  * @send_ioctx: Send I/O context.
1406  */
1407 static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1408 			    struct srpt_recv_ioctx *recv_ioctx,
1409 			    struct srpt_send_ioctx *send_ioctx)
1410 {
1411 	struct se_cmd *cmd;
1412 	struct srp_cmd *srp_cmd;
1413 	struct scatterlist *sg = NULL;
1414 	unsigned sg_cnt = 0;
1415 	u64 data_len;
1416 	enum dma_data_direction dir;
1417 	int rc;
1418 
1419 	BUG_ON(!send_ioctx);
1420 
1421 	srp_cmd = recv_ioctx->ioctx.buf;
1422 	cmd = &send_ioctx->cmd;
1423 	cmd->tag = srp_cmd->tag;
1424 
1425 	switch (srp_cmd->task_attr) {
1426 	case SRP_CMD_SIMPLE_Q:
1427 		cmd->sam_task_attr = TCM_SIMPLE_TAG;
1428 		break;
1429 	case SRP_CMD_ORDERED_Q:
1430 	default:
1431 		cmd->sam_task_attr = TCM_ORDERED_TAG;
1432 		break;
1433 	case SRP_CMD_HEAD_OF_Q:
1434 		cmd->sam_task_attr = TCM_HEAD_TAG;
1435 		break;
1436 	case SRP_CMD_ACA:
1437 		cmd->sam_task_attr = TCM_ACA_TAG;
1438 		break;
1439 	}
1440 
1441 	rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
1442 			&data_len);
1443 	if (rc) {
1444 		if (rc != -EAGAIN) {
1445 			pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1446 			       srp_cmd->tag);
1447 		}
1448 		goto release_ioctx;
1449 	}
1450 
1451 	rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
1452 			       &send_ioctx->sense_data[0],
1453 			       scsilun_to_int(&srp_cmd->lun), data_len,
1454 			       TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
1455 			       sg, sg_cnt, NULL, 0, NULL, 0);
1456 	if (rc != 0) {
1457 		pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
1458 			 srp_cmd->tag);
1459 		goto release_ioctx;
1460 	}
1461 	return;
1462 
1463 release_ioctx:
1464 	send_ioctx->state = SRPT_STATE_DONE;
1465 	srpt_release_cmd(cmd);
1466 }
1467 
1468 static int srp_tmr_to_tcm(int fn)
1469 {
1470 	switch (fn) {
1471 	case SRP_TSK_ABORT_TASK:
1472 		return TMR_ABORT_TASK;
1473 	case SRP_TSK_ABORT_TASK_SET:
1474 		return TMR_ABORT_TASK_SET;
1475 	case SRP_TSK_CLEAR_TASK_SET:
1476 		return TMR_CLEAR_TASK_SET;
1477 	case SRP_TSK_LUN_RESET:
1478 		return TMR_LUN_RESET;
1479 	case SRP_TSK_CLEAR_ACA:
1480 		return TMR_CLEAR_ACA;
1481 	default:
1482 		return -1;
1483 	}
1484 }
1485 
1486 /**
1487  * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
1488  * @ch: SRPT RDMA channel.
1489  * @recv_ioctx: Receive I/O context.
1490  * @send_ioctx: Send I/O context.
1491  *
1492  * Returns 0 if and only if the request will be processed by the target core.
1493  *
1494  * For more information about SRP_TSK_MGMT information units, see also section
1495  * 6.7 in the SRP r16a document.
1496  */
1497 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1498 				 struct srpt_recv_ioctx *recv_ioctx,
1499 				 struct srpt_send_ioctx *send_ioctx)
1500 {
1501 	struct srp_tsk_mgmt *srp_tsk;
1502 	struct se_cmd *cmd;
1503 	struct se_session *sess = ch->sess;
1504 	int tcm_tmr;
1505 	int rc;
1506 
1507 	BUG_ON(!send_ioctx);
1508 
1509 	srp_tsk = recv_ioctx->ioctx.buf;
1510 	cmd = &send_ioctx->cmd;
1511 
1512 	pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
1513 		 srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
1514 		 ch->sess);
1515 
1516 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1517 	send_ioctx->cmd.tag = srp_tsk->tag;
1518 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1519 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1520 			       scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1521 			       GFP_KERNEL, srp_tsk->task_tag,
1522 			       TARGET_SCF_ACK_KREF);
1523 	if (rc != 0) {
1524 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1525 		goto fail;
1526 	}
1527 	return;
1528 fail:
1529 	transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
1530 }
1531 
1532 /**
1533  * srpt_handle_new_iu - process a newly received information unit
1534  * @ch:    RDMA channel through which the information unit has been received.
1535  * @recv_ioctx: Receive I/O context associated with the information unit.
1536  */
1537 static bool
1538 srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
1539 {
1540 	struct srpt_send_ioctx *send_ioctx = NULL;
1541 	struct srp_cmd *srp_cmd;
1542 	bool res = false;
1543 	u8 opcode;
1544 
1545 	BUG_ON(!ch);
1546 	BUG_ON(!recv_ioctx);
1547 
1548 	if (unlikely(ch->state == CH_CONNECTING))
1549 		goto push;
1550 
1551 	ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1552 				   recv_ioctx->ioctx.dma, srp_max_req_size,
1553 				   DMA_FROM_DEVICE);
1554 
1555 	srp_cmd = recv_ioctx->ioctx.buf;
1556 	opcode = srp_cmd->opcode;
1557 	if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
1558 		send_ioctx = srpt_get_send_ioctx(ch);
1559 		if (unlikely(!send_ioctx))
1560 			goto push;
1561 	}
1562 
1563 	if (!list_empty(&recv_ioctx->wait_list)) {
1564 		WARN_ON_ONCE(!ch->processing_wait_list);
1565 		list_del_init(&recv_ioctx->wait_list);
1566 	}
1567 
1568 	switch (opcode) {
1569 	case SRP_CMD:
1570 		srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1571 		break;
1572 	case SRP_TSK_MGMT:
1573 		srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1574 		break;
1575 	case SRP_I_LOGOUT:
1576 		pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1577 		break;
1578 	case SRP_CRED_RSP:
1579 		pr_debug("received SRP_CRED_RSP\n");
1580 		break;
1581 	case SRP_AER_RSP:
1582 		pr_debug("received SRP_AER_RSP\n");
1583 		break;
1584 	case SRP_RSP:
1585 		pr_err("Received SRP_RSP\n");
1586 		break;
1587 	default:
1588 		pr_err("received IU with unknown opcode 0x%x\n", opcode);
1589 		break;
1590 	}
1591 
1592 	srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1593 	res = true;
1594 
1595 out:
1596 	return res;
1597 
1598 push:
1599 	if (list_empty(&recv_ioctx->wait_list)) {
1600 		WARN_ON_ONCE(ch->processing_wait_list);
1601 		list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1602 	}
1603 	goto out;
1604 }
1605 
1606 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1607 {
1608 	struct srpt_rdma_ch *ch = cq->cq_context;
1609 	struct srpt_recv_ioctx *ioctx =
1610 		container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1611 
1612 	if (wc->status == IB_WC_SUCCESS) {
1613 		int req_lim;
1614 
1615 		req_lim = atomic_dec_return(&ch->req_lim);
1616 		if (unlikely(req_lim < 0))
1617 			pr_err("req_lim = %d < 0\n", req_lim);
1618 		srpt_handle_new_iu(ch, ioctx);
1619 	} else {
1620 		pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1621 				    ioctx, wc->status);
1622 	}
1623 }
1624 
1625 /*
1626  * This function must be called from the context in which RDMA completions are
1627  * processed because it accesses the wait list without protection against
1628  * access from other threads.
1629  */
1630 static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1631 {
1632 	struct srpt_recv_ioctx *recv_ioctx, *tmp;
1633 
1634 	WARN_ON_ONCE(ch->state == CH_CONNECTING);
1635 
1636 	if (list_empty(&ch->cmd_wait_list))
1637 		return;
1638 
1639 	WARN_ON_ONCE(ch->processing_wait_list);
1640 	ch->processing_wait_list = true;
1641 	list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
1642 				 wait_list) {
1643 		if (!srpt_handle_new_iu(ch, recv_ioctx))
1644 			break;
1645 	}
1646 	ch->processing_wait_list = false;
1647 }
1648 
1649 /**
1650  * srpt_send_done - send completion callback
1651  * @cq: Completion queue.
1652  * @wc: Work completion.
1653  *
1654  * Note: Although this has not yet been observed during tests, at least in
1655  * theory it is possible that the srpt_get_send_ioctx() call invoked by
1656  * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1657  * value in each response is set to one, and it is possible that this response
1658  * makes the initiator send a new request before the send completion for that
1659  * response has been processed. This could e.g. happen if the call to
1660  * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1661  * if IB retransmission causes generation of the send completion to be
1662  * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1663  * are queued on cmd_wait_list. The code below processes these delayed
1664  * requests one at a time.
1665  */
1666 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1667 {
1668 	struct srpt_rdma_ch *ch = cq->cq_context;
1669 	struct srpt_send_ioctx *ioctx =
1670 		container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1671 	enum srpt_command_state state;
1672 
1673 	state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1674 
1675 	WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1676 		state != SRPT_STATE_MGMT_RSP_SENT);
1677 
1678 	atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1679 
1680 	if (wc->status != IB_WC_SUCCESS)
1681 		pr_info("sending response for ioctx 0x%p failed"
1682 			" with status %d\n", ioctx, wc->status);
1683 
1684 	if (state != SRPT_STATE_DONE) {
1685 		transport_generic_free_cmd(&ioctx->cmd, 0);
1686 	} else {
1687 		pr_err("IB completion has been received too late for"
1688 		       " wr_id = %u.\n", ioctx->ioctx.index);
1689 	}
1690 
1691 	srpt_process_wait_list(ch);
1692 }
1693 
1694 /**
1695  * srpt_create_ch_ib - create receive and send completion queues
1696  * @ch: SRPT RDMA channel.
1697  */
1698 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1699 {
1700 	struct ib_qp_init_attr *qp_init;
1701 	struct srpt_port *sport = ch->sport;
1702 	struct srpt_device *sdev = sport->sdev;
1703 	const struct ib_device_attr *attrs = &sdev->device->attrs;
1704 	int sq_size = sport->port_attrib.srp_sq_size;
1705 	int i, ret;
1706 
1707 	WARN_ON(ch->rq_size < 1);
1708 
1709 	ret = -ENOMEM;
1710 	qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1711 	if (!qp_init)
1712 		goto out;
1713 
1714 retry:
1715 	ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
1716 			0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1717 	if (IS_ERR(ch->cq)) {
1718 		ret = PTR_ERR(ch->cq);
1719 		pr_err("failed to create CQ cqe= %d ret= %d\n",
1720 		       ch->rq_size + sq_size, ret);
1721 		goto out;
1722 	}
1723 
1724 	qp_init->qp_context = (void *)ch;
1725 	qp_init->event_handler
1726 		= (void(*)(struct ib_event *, void*))srpt_qp_event;
1727 	qp_init->send_cq = ch->cq;
1728 	qp_init->recv_cq = ch->cq;
1729 	qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1730 	qp_init->qp_type = IB_QPT_RC;
1731 	/*
1732 	 * We divide up our send queue size into half SEND WRs to send the
1733 	 * completions, and half R/W contexts to actually do the RDMA
1734 	 * READ/WRITE transfers.  Note that we need to allocate CQ slots for
1735 	 * both both, as RDMA contexts will also post completions for the
1736 	 * RDMA READ case.
1737 	 */
1738 	qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
1739 	qp_init->cap.max_rdma_ctxs = sq_size / 2;
1740 	qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
1741 	qp_init->port_num = ch->sport->port;
1742 	if (sdev->use_srq) {
1743 		qp_init->srq = sdev->srq;
1744 	} else {
1745 		qp_init->cap.max_recv_wr = ch->rq_size;
1746 		qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
1747 	}
1748 
1749 	ch->qp = ib_create_qp(sdev->pd, qp_init);
1750 	if (IS_ERR(ch->qp)) {
1751 		ret = PTR_ERR(ch->qp);
1752 		if (ret == -ENOMEM) {
1753 			sq_size /= 2;
1754 			if (sq_size >= MIN_SRPT_SQ_SIZE) {
1755 				ib_destroy_cq(ch->cq);
1756 				goto retry;
1757 			}
1758 		}
1759 		pr_err("failed to create_qp ret= %d\n", ret);
1760 		goto err_destroy_cq;
1761 	}
1762 
1763 	atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1764 
1765 	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1766 		 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1767 		 qp_init->cap.max_send_wr, ch);
1768 
1769 	ret = srpt_init_ch_qp(ch, ch->qp);
1770 	if (ret)
1771 		goto err_destroy_qp;
1772 
1773 	if (!sdev->use_srq)
1774 		for (i = 0; i < ch->rq_size; i++)
1775 			srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
1776 
1777 out:
1778 	kfree(qp_init);
1779 	return ret;
1780 
1781 err_destroy_qp:
1782 	ib_destroy_qp(ch->qp);
1783 err_destroy_cq:
1784 	ib_free_cq(ch->cq);
1785 	goto out;
1786 }
1787 
1788 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1789 {
1790 	ib_destroy_qp(ch->qp);
1791 	ib_free_cq(ch->cq);
1792 }
1793 
1794 /**
1795  * srpt_close_ch - close a RDMA channel
1796  * @ch: SRPT RDMA channel.
1797  *
1798  * Make sure all resources associated with the channel will be deallocated at
1799  * an appropriate time.
1800  *
1801  * Returns true if and only if the channel state has been modified into
1802  * CH_DRAINING.
1803  */
1804 static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1805 {
1806 	int ret;
1807 
1808 	if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1809 		pr_debug("%s-%d: already closed\n", ch->sess_name,
1810 			 ch->qp->qp_num);
1811 		return false;
1812 	}
1813 
1814 	kref_get(&ch->kref);
1815 
1816 	ret = srpt_ch_qp_err(ch);
1817 	if (ret < 0)
1818 		pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1819 		       ch->sess_name, ch->qp->qp_num, ret);
1820 
1821 	ret = srpt_zerolength_write(ch);
1822 	if (ret < 0) {
1823 		pr_err("%s-%d: queuing zero-length write failed: %d\n",
1824 		       ch->sess_name, ch->qp->qp_num, ret);
1825 		if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1826 			schedule_work(&ch->release_work);
1827 		else
1828 			WARN_ON_ONCE(true);
1829 	}
1830 
1831 	kref_put(&ch->kref, srpt_free_ch);
1832 
1833 	return true;
1834 }
1835 
1836 /*
1837  * Change the channel state into CH_DISCONNECTING. If a channel has not yet
1838  * reached the connected state, close it. If a channel is in the connected
1839  * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
1840  * the responsibility of the caller to ensure that this function is not
1841  * invoked concurrently with the code that accepts a connection. This means
1842  * that this function must either be invoked from inside a CM callback
1843  * function or that it must be invoked with the srpt_port.mutex held.
1844  */
1845 static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1846 {
1847 	int ret;
1848 
1849 	if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1850 		return -ENOTCONN;
1851 
1852 	ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
1853 	if (ret < 0)
1854 		ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
1855 
1856 	if (ret < 0 && srpt_close_ch(ch))
1857 		ret = 0;
1858 
1859 	return ret;
1860 }
1861 
1862 static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
1863 {
1864 	struct srpt_nexus *nexus;
1865 	struct srpt_rdma_ch *ch2;
1866 	bool res = true;
1867 
1868 	rcu_read_lock();
1869 	list_for_each_entry(nexus, &sport->nexus_list, entry) {
1870 		list_for_each_entry(ch2, &nexus->ch_list, list) {
1871 			if (ch2 == ch) {
1872 				res = false;
1873 				goto done;
1874 			}
1875 		}
1876 	}
1877 done:
1878 	rcu_read_unlock();
1879 
1880 	return res;
1881 }
1882 
1883 /* Send DREQ and wait for DREP. */
1884 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
1885 {
1886 	struct srpt_port *sport = ch->sport;
1887 
1888 	pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
1889 		 ch->state);
1890 
1891 	mutex_lock(&sport->mutex);
1892 	srpt_disconnect_ch(ch);
1893 	mutex_unlock(&sport->mutex);
1894 
1895 	while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
1896 				  5 * HZ) == 0)
1897 		pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
1898 			ch->sess_name, ch->qp->qp_num, ch->state);
1899 
1900 }
1901 
1902 static void __srpt_close_all_ch(struct srpt_port *sport)
1903 {
1904 	struct srpt_nexus *nexus;
1905 	struct srpt_rdma_ch *ch;
1906 
1907 	lockdep_assert_held(&sport->mutex);
1908 
1909 	list_for_each_entry(nexus, &sport->nexus_list, entry) {
1910 		list_for_each_entry(ch, &nexus->ch_list, list) {
1911 			if (srpt_disconnect_ch(ch) >= 0)
1912 				pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
1913 					ch->sess_name, ch->qp->qp_num,
1914 					sport->sdev->device->name, sport->port);
1915 			srpt_close_ch(ch);
1916 		}
1917 	}
1918 }
1919 
1920 /*
1921  * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
1922  * it does not yet exist.
1923  */
1924 static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
1925 					 const u8 i_port_id[16],
1926 					 const u8 t_port_id[16])
1927 {
1928 	struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
1929 
1930 	for (;;) {
1931 		mutex_lock(&sport->mutex);
1932 		list_for_each_entry(n, &sport->nexus_list, entry) {
1933 			if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
1934 			    memcmp(n->t_port_id, t_port_id, 16) == 0) {
1935 				nexus = n;
1936 				break;
1937 			}
1938 		}
1939 		if (!nexus && tmp_nexus) {
1940 			list_add_tail_rcu(&tmp_nexus->entry,
1941 					  &sport->nexus_list);
1942 			swap(nexus, tmp_nexus);
1943 		}
1944 		mutex_unlock(&sport->mutex);
1945 
1946 		if (nexus)
1947 			break;
1948 		tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
1949 		if (!tmp_nexus) {
1950 			nexus = ERR_PTR(-ENOMEM);
1951 			break;
1952 		}
1953 		INIT_LIST_HEAD(&tmp_nexus->ch_list);
1954 		memcpy(tmp_nexus->i_port_id, i_port_id, 16);
1955 		memcpy(tmp_nexus->t_port_id, t_port_id, 16);
1956 	}
1957 
1958 	kfree(tmp_nexus);
1959 
1960 	return nexus;
1961 }
1962 
1963 static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
1964 	__must_hold(&sport->mutex)
1965 {
1966 	lockdep_assert_held(&sport->mutex);
1967 
1968 	if (sport->enabled == enabled)
1969 		return;
1970 	sport->enabled = enabled;
1971 	if (!enabled)
1972 		__srpt_close_all_ch(sport);
1973 }
1974 
1975 static void srpt_free_ch(struct kref *kref)
1976 {
1977 	struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
1978 
1979 	kfree_rcu(ch, rcu);
1980 }
1981 
1982 static void srpt_release_channel_work(struct work_struct *w)
1983 {
1984 	struct srpt_rdma_ch *ch;
1985 	struct srpt_device *sdev;
1986 	struct srpt_port *sport;
1987 	struct se_session *se_sess;
1988 
1989 	ch = container_of(w, struct srpt_rdma_ch, release_work);
1990 	pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
1991 
1992 	sdev = ch->sport->sdev;
1993 	BUG_ON(!sdev);
1994 
1995 	se_sess = ch->sess;
1996 	BUG_ON(!se_sess);
1997 
1998 	target_sess_cmd_list_set_waiting(se_sess);
1999 	target_wait_for_sess_cmds(se_sess);
2000 
2001 	transport_deregister_session_configfs(se_sess);
2002 	transport_deregister_session(se_sess);
2003 	ch->sess = NULL;
2004 
2005 	ib_destroy_cm_id(ch->ib_cm.cm_id);
2006 
2007 	srpt_destroy_ch_ib(ch);
2008 
2009 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2010 			     ch->sport->sdev, ch->rq_size,
2011 			     ch->max_rsp_size, DMA_TO_DEVICE);
2012 
2013 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2014 			     sdev, ch->rq_size,
2015 			     srp_max_req_size, DMA_FROM_DEVICE);
2016 
2017 	sport = ch->sport;
2018 	mutex_lock(&sport->mutex);
2019 	list_del_rcu(&ch->list);
2020 	mutex_unlock(&sport->mutex);
2021 
2022 	wake_up(&sport->ch_releaseQ);
2023 
2024 	kref_put(&ch->kref, srpt_free_ch);
2025 }
2026 
2027 /**
2028  * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
2029  * @cm_id: IB/CM connection identifier.
2030  * @port_num: Port through which the IB/CM REQ message was received.
2031  * @pkey: P_Key of the incoming connection.
2032  * @req: SRP login request.
2033  * @src_addr: GID of the port that submitted the login request.
2034  *
2035  * Ownership of the cm_id is transferred to the target session if this
2036  * functions returns zero. Otherwise the caller remains the owner of cm_id.
2037  */
2038 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2039 			    u8 port_num, __be16 pkey,
2040 			    const struct srp_login_req *req,
2041 			    const char *src_addr)
2042 {
2043 	struct srpt_device *sdev = cm_id->context;
2044 	struct srpt_port *sport = &sdev->port[port_num - 1];
2045 	struct srpt_nexus *nexus;
2046 	struct srp_login_rsp *rsp = NULL;
2047 	struct srp_login_rej *rej = NULL;
2048 	struct ib_cm_rep_param *rep_param = NULL;
2049 	struct srpt_rdma_ch *ch;
2050 	char i_port_id[36];
2051 	u32 it_iu_len;
2052 	int i, ret;
2053 
2054 	WARN_ON_ONCE(irqs_disabled());
2055 
2056 	if (WARN_ON(!sdev || !req))
2057 		return -EINVAL;
2058 
2059 	it_iu_len = be32_to_cpu(req->req_it_iu_len);
2060 
2061 	pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
2062 		req->initiator_port_id, req->target_port_id, it_iu_len,
2063 		port_num, &sport->gid, be16_to_cpu(pkey));
2064 
2065 	nexus = srpt_get_nexus(sport, req->initiator_port_id,
2066 			       req->target_port_id);
2067 	if (IS_ERR(nexus)) {
2068 		ret = PTR_ERR(nexus);
2069 		goto out;
2070 	}
2071 
2072 	ret = -ENOMEM;
2073 	rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2074 	rej = kzalloc(sizeof(*rej), GFP_KERNEL);
2075 	rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
2076 	if (!rsp || !rej || !rep_param)
2077 		goto out;
2078 
2079 	ret = -EINVAL;
2080 	if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2081 		rej->reason = cpu_to_be32(
2082 				SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2083 		pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
2084 		       it_iu_len, 64, srp_max_req_size);
2085 		goto reject;
2086 	}
2087 
2088 	if (!sport->enabled) {
2089 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2090 		pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2091 			sport->sdev->device->name, port_num);
2092 		goto reject;
2093 	}
2094 
2095 	if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2096 	    || *(__be64 *)(req->target_port_id + 8) !=
2097 	       cpu_to_be64(srpt_service_guid)) {
2098 		rej->reason = cpu_to_be32(
2099 				SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2100 		pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
2101 		goto reject;
2102 	}
2103 
2104 	ret = -ENOMEM;
2105 	ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2106 	if (!ch) {
2107 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2108 		pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
2109 		goto reject;
2110 	}
2111 
2112 	kref_init(&ch->kref);
2113 	ch->pkey = be16_to_cpu(pkey);
2114 	ch->nexus = nexus;
2115 	ch->zw_cqe.done = srpt_zerolength_write_done;
2116 	INIT_WORK(&ch->release_work, srpt_release_channel_work);
2117 	ch->sport = sport;
2118 	ch->ib_cm.cm_id = cm_id;
2119 	cm_id->context = ch;
2120 	/*
2121 	 * ch->rq_size should be at least as large as the initiator queue
2122 	 * depth to avoid that the initiator driver has to report QUEUE_FULL
2123 	 * to the SCSI mid-layer.
2124 	 */
2125 	ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2126 	spin_lock_init(&ch->spinlock);
2127 	ch->state = CH_CONNECTING;
2128 	INIT_LIST_HEAD(&ch->cmd_wait_list);
2129 	ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2130 
2131 	ch->ioctx_ring = (struct srpt_send_ioctx **)
2132 		srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2133 				      sizeof(*ch->ioctx_ring[0]),
2134 				      ch->max_rsp_size, DMA_TO_DEVICE);
2135 	if (!ch->ioctx_ring) {
2136 		pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
2137 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2138 		goto free_ch;
2139 	}
2140 
2141 	INIT_LIST_HEAD(&ch->free_list);
2142 	for (i = 0; i < ch->rq_size; i++) {
2143 		ch->ioctx_ring[i]->ch = ch;
2144 		list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2145 	}
2146 	if (!sdev->use_srq) {
2147 		ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
2148 			srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2149 					      sizeof(*ch->ioctx_recv_ring[0]),
2150 					      srp_max_req_size,
2151 					      DMA_FROM_DEVICE);
2152 		if (!ch->ioctx_recv_ring) {
2153 			pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
2154 			rej->reason =
2155 			    cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2156 			goto free_ring;
2157 		}
2158 		for (i = 0; i < ch->rq_size; i++)
2159 			INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
2160 	}
2161 
2162 	ret = srpt_create_ch_ib(ch);
2163 	if (ret) {
2164 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2165 		pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
2166 		goto free_recv_ring;
2167 	}
2168 
2169 	strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
2170 	snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
2171 			be64_to_cpu(*(__be64 *)nexus->i_port_id),
2172 			be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
2173 
2174 	pr_debug("registering session %s\n", ch->sess_name);
2175 
2176 	if (sport->port_guid_tpg.se_tpg_wwn)
2177 		ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
2178 						TARGET_PROT_NORMAL,
2179 						ch->sess_name, ch, NULL);
2180 	if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2181 		ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
2182 					TARGET_PROT_NORMAL, i_port_id, ch,
2183 					NULL);
2184 	/* Retry without leading "0x" */
2185 	if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2186 		ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
2187 						TARGET_PROT_NORMAL,
2188 						i_port_id + 2, ch, NULL);
2189 	if (IS_ERR_OR_NULL(ch->sess)) {
2190 		ret = PTR_ERR(ch->sess);
2191 		pr_info("Rejected login for initiator %s: ret = %d.\n",
2192 			ch->sess_name, ret);
2193 		rej->reason = cpu_to_be32(ret == -ENOMEM ?
2194 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2195 				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2196 		goto reject;
2197 	}
2198 
2199 	mutex_lock(&sport->mutex);
2200 
2201 	if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2202 		struct srpt_rdma_ch *ch2;
2203 
2204 		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2205 
2206 		list_for_each_entry(ch2, &nexus->ch_list, list) {
2207 			if (srpt_disconnect_ch(ch2) < 0)
2208 				continue;
2209 			pr_info("Relogin - closed existing channel %s\n",
2210 				ch2->sess_name);
2211 			rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2212 		}
2213 	} else {
2214 		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2215 	}
2216 
2217 	list_add_tail_rcu(&ch->list, &nexus->ch_list);
2218 
2219 	if (!sport->enabled) {
2220 		rej->reason = cpu_to_be32(
2221 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2222 		pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2223 			sdev->device->name, port_num);
2224 		mutex_unlock(&sport->mutex);
2225 		goto reject;
2226 	}
2227 
2228 	mutex_unlock(&sport->mutex);
2229 
2230 	ret = srpt_ch_qp_rtr(ch, ch->qp);
2231 	if (ret) {
2232 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2233 		pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
2234 		       ret);
2235 		goto destroy_ib;
2236 	}
2237 
2238 	pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
2239 		 ch->sess_name, ch);
2240 
2241 	/* create srp_login_response */
2242 	rsp->opcode = SRP_LOGIN_RSP;
2243 	rsp->tag = req->tag;
2244 	rsp->max_it_iu_len = req->req_it_iu_len;
2245 	rsp->max_ti_iu_len = req->req_it_iu_len;
2246 	ch->max_ti_iu_len = it_iu_len;
2247 	rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2248 				   SRP_BUF_FORMAT_INDIRECT);
2249 	rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2250 	atomic_set(&ch->req_lim, ch->rq_size);
2251 	atomic_set(&ch->req_lim_delta, 0);
2252 
2253 	/* create cm reply */
2254 	rep_param->qp_num = ch->qp->qp_num;
2255 	rep_param->private_data = (void *)rsp;
2256 	rep_param->private_data_len = sizeof(*rsp);
2257 	rep_param->rnr_retry_count = 7;
2258 	rep_param->flow_control = 1;
2259 	rep_param->failover_accepted = 0;
2260 	rep_param->srq = 1;
2261 	rep_param->responder_resources = 4;
2262 	rep_param->initiator_depth = 4;
2263 
2264 	/*
2265 	 * Hold the sport mutex while accepting a connection to avoid that
2266 	 * srpt_disconnect_ch() is invoked concurrently with this code.
2267 	 */
2268 	mutex_lock(&sport->mutex);
2269 	if (sport->enabled && ch->state == CH_CONNECTING)
2270 		ret = ib_send_cm_rep(cm_id, rep_param);
2271 	else
2272 		ret = -EINVAL;
2273 	mutex_unlock(&sport->mutex);
2274 
2275 	switch (ret) {
2276 	case 0:
2277 		break;
2278 	case -EINVAL:
2279 		goto reject;
2280 	default:
2281 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2282 		pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
2283 		       ret);
2284 		goto reject;
2285 	}
2286 
2287 	goto out;
2288 
2289 destroy_ib:
2290 	srpt_destroy_ch_ib(ch);
2291 
2292 free_recv_ring:
2293 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2294 			     ch->sport->sdev, ch->rq_size,
2295 			     srp_max_req_size, DMA_FROM_DEVICE);
2296 
2297 free_ring:
2298 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2299 			     ch->sport->sdev, ch->rq_size,
2300 			     ch->max_rsp_size, DMA_TO_DEVICE);
2301 free_ch:
2302 	cm_id->context = NULL;
2303 	kfree(ch);
2304 	ch = NULL;
2305 
2306 	WARN_ON_ONCE(ret == 0);
2307 
2308 reject:
2309 	pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
2310 	rej->opcode = SRP_LOGIN_REJ;
2311 	rej->tag = req->tag;
2312 	rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2313 				   SRP_BUF_FORMAT_INDIRECT);
2314 
2315 	ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2316 			     (void *)rej, sizeof(*rej));
2317 
2318 out:
2319 	kfree(rep_param);
2320 	kfree(rsp);
2321 	kfree(rej);
2322 
2323 	return ret;
2324 }
2325 
2326 static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
2327 			       struct ib_cm_req_event_param *param,
2328 			       void *private_data)
2329 {
2330 	char sguid[40];
2331 
2332 	srpt_format_guid(sguid, sizeof(sguid),
2333 			 &param->primary_path->dgid.global.interface_id);
2334 
2335 	return srpt_cm_req_recv(cm_id, param->port, param->primary_path->pkey,
2336 				private_data, sguid);
2337 }
2338 
2339 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2340 			     enum ib_cm_rej_reason reason,
2341 			     const u8 *private_data,
2342 			     u8 private_data_len)
2343 {
2344 	char *priv = NULL;
2345 	int i;
2346 
2347 	if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
2348 						GFP_KERNEL))) {
2349 		for (i = 0; i < private_data_len; i++)
2350 			sprintf(priv + 3 * i, " %02x", private_data[i]);
2351 	}
2352 	pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2353 		ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2354 		"; private data" : "", priv ? priv : " (?)");
2355 	kfree(priv);
2356 }
2357 
2358 /**
2359  * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
2360  * @ch: SRPT RDMA channel.
2361  *
2362  * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2363  * and that the recipient may begin transmitting (RTU = ready to use).
2364  */
2365 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2366 {
2367 	int ret;
2368 
2369 	ret = srpt_ch_qp_rts(ch, ch->qp);
2370 	if (ret < 0) {
2371 		pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
2372 		       ch->qp->qp_num);
2373 		srpt_close_ch(ch);
2374 		return;
2375 	}
2376 
2377 	/*
2378 	 * Note: calling srpt_close_ch() if the transition to the LIVE state
2379 	 * fails is not necessary since that means that that function has
2380 	 * already been invoked from another thread.
2381 	 */
2382 	if (!srpt_set_ch_state(ch, CH_LIVE)) {
2383 		pr_err("%s-%d: channel transition to LIVE state failed\n",
2384 		       ch->sess_name, ch->qp->qp_num);
2385 		return;
2386 	}
2387 
2388 	/* Trigger wait list processing. */
2389 	ret = srpt_zerolength_write(ch);
2390 	WARN_ONCE(ret < 0, "%d\n", ret);
2391 }
2392 
2393 /**
2394  * srpt_cm_handler - IB connection manager callback function
2395  * @cm_id: IB/CM connection identifier.
2396  * @event: IB/CM event.
2397  *
2398  * A non-zero return value will cause the caller destroy the CM ID.
2399  *
2400  * Note: srpt_cm_handler() must only return a non-zero value when transferring
2401  * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2402  * a non-zero value in any other case will trigger a race with the
2403  * ib_destroy_cm_id() call in srpt_release_channel().
2404  */
2405 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2406 {
2407 	struct srpt_rdma_ch *ch = cm_id->context;
2408 	int ret;
2409 
2410 	ret = 0;
2411 	switch (event->event) {
2412 	case IB_CM_REQ_RECEIVED:
2413 		ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
2414 					  event->private_data);
2415 		break;
2416 	case IB_CM_REJ_RECEIVED:
2417 		srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2418 				 event->private_data,
2419 				 IB_CM_REJ_PRIVATE_DATA_SIZE);
2420 		break;
2421 	case IB_CM_RTU_RECEIVED:
2422 	case IB_CM_USER_ESTABLISHED:
2423 		srpt_cm_rtu_recv(ch);
2424 		break;
2425 	case IB_CM_DREQ_RECEIVED:
2426 		srpt_disconnect_ch(ch);
2427 		break;
2428 	case IB_CM_DREP_RECEIVED:
2429 		pr_info("Received CM DREP message for ch %s-%d.\n",
2430 			ch->sess_name, ch->qp->qp_num);
2431 		srpt_close_ch(ch);
2432 		break;
2433 	case IB_CM_TIMEWAIT_EXIT:
2434 		pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2435 			ch->sess_name, ch->qp->qp_num);
2436 		srpt_close_ch(ch);
2437 		break;
2438 	case IB_CM_REP_ERROR:
2439 		pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2440 			ch->qp->qp_num);
2441 		break;
2442 	case IB_CM_DREQ_ERROR:
2443 		pr_info("Received CM DREQ ERROR event.\n");
2444 		break;
2445 	case IB_CM_MRA_RECEIVED:
2446 		pr_info("Received CM MRA event\n");
2447 		break;
2448 	default:
2449 		pr_err("received unrecognized CM event %d\n", event->event);
2450 		break;
2451 	}
2452 
2453 	return ret;
2454 }
2455 
2456 static int srpt_write_pending_status(struct se_cmd *se_cmd)
2457 {
2458 	struct srpt_send_ioctx *ioctx;
2459 
2460 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2461 	return ioctx->state == SRPT_STATE_NEED_DATA;
2462 }
2463 
2464 /*
2465  * srpt_write_pending - Start data transfer from initiator to target (write).
2466  */
2467 static int srpt_write_pending(struct se_cmd *se_cmd)
2468 {
2469 	struct srpt_send_ioctx *ioctx =
2470 		container_of(se_cmd, struct srpt_send_ioctx, cmd);
2471 	struct srpt_rdma_ch *ch = ioctx->ch;
2472 	struct ib_send_wr *first_wr = NULL, *bad_wr;
2473 	struct ib_cqe *cqe = &ioctx->rdma_cqe;
2474 	enum srpt_command_state new_state;
2475 	int ret, i;
2476 
2477 	new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2478 	WARN_ON(new_state == SRPT_STATE_DONE);
2479 
2480 	if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2481 		pr_warn("%s: IB send queue full (needed %d)\n",
2482 				__func__, ioctx->n_rdma);
2483 		ret = -ENOMEM;
2484 		goto out_undo;
2485 	}
2486 
2487 	cqe->done = srpt_rdma_read_done;
2488 	for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2489 		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2490 
2491 		first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2492 				cqe, first_wr);
2493 		cqe = NULL;
2494 	}
2495 
2496 	ret = ib_post_send(ch->qp, first_wr, &bad_wr);
2497 	if (ret) {
2498 		pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
2499 			 __func__, ret, ioctx->n_rdma,
2500 			 atomic_read(&ch->sq_wr_avail));
2501 		goto out_undo;
2502 	}
2503 
2504 	return 0;
2505 out_undo:
2506 	atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2507 	return ret;
2508 }
2509 
2510 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2511 {
2512 	switch (tcm_mgmt_status) {
2513 	case TMR_FUNCTION_COMPLETE:
2514 		return SRP_TSK_MGMT_SUCCESS;
2515 	case TMR_FUNCTION_REJECTED:
2516 		return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2517 	}
2518 	return SRP_TSK_MGMT_FAILED;
2519 }
2520 
2521 /**
2522  * srpt_queue_response - transmit the response to a SCSI command
2523  * @cmd: SCSI target command.
2524  *
2525  * Callback function called by the TCM core. Must not block since it can be
2526  * invoked on the context of the IB completion handler.
2527  */
2528 static void srpt_queue_response(struct se_cmd *cmd)
2529 {
2530 	struct srpt_send_ioctx *ioctx =
2531 		container_of(cmd, struct srpt_send_ioctx, cmd);
2532 	struct srpt_rdma_ch *ch = ioctx->ch;
2533 	struct srpt_device *sdev = ch->sport->sdev;
2534 	struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
2535 	struct ib_sge sge;
2536 	enum srpt_command_state state;
2537 	int resp_len, ret, i;
2538 	u8 srp_tm_status;
2539 
2540 	BUG_ON(!ch);
2541 
2542 	state = ioctx->state;
2543 	switch (state) {
2544 	case SRPT_STATE_NEW:
2545 	case SRPT_STATE_DATA_IN:
2546 		ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2547 		break;
2548 	case SRPT_STATE_MGMT:
2549 		ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2550 		break;
2551 	default:
2552 		WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2553 			ch, ioctx->ioctx.index, ioctx->state);
2554 		break;
2555 	}
2556 
2557 	if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
2558 		return;
2559 
2560 	/* For read commands, transfer the data to the initiator. */
2561 	if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
2562 	    ioctx->cmd.data_length &&
2563 	    !ioctx->queue_status_only) {
2564 		for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2565 			struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2566 
2567 			first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
2568 					ch->sport->port, NULL, first_wr);
2569 		}
2570 	}
2571 
2572 	if (state != SRPT_STATE_MGMT)
2573 		resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2574 					      cmd->scsi_status);
2575 	else {
2576 		srp_tm_status
2577 			= tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2578 		resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2579 						 ioctx->cmd.tag);
2580 	}
2581 
2582 	atomic_inc(&ch->req_lim);
2583 
2584 	if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
2585 			&ch->sq_wr_avail) < 0)) {
2586 		pr_warn("%s: IB send queue full (needed %d)\n",
2587 				__func__, ioctx->n_rdma);
2588 		ret = -ENOMEM;
2589 		goto out;
2590 	}
2591 
2592 	ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2593 				      DMA_TO_DEVICE);
2594 
2595 	sge.addr = ioctx->ioctx.dma;
2596 	sge.length = resp_len;
2597 	sge.lkey = sdev->lkey;
2598 
2599 	ioctx->ioctx.cqe.done = srpt_send_done;
2600 	send_wr.next = NULL;
2601 	send_wr.wr_cqe = &ioctx->ioctx.cqe;
2602 	send_wr.sg_list = &sge;
2603 	send_wr.num_sge = 1;
2604 	send_wr.opcode = IB_WR_SEND;
2605 	send_wr.send_flags = IB_SEND_SIGNALED;
2606 
2607 	ret = ib_post_send(ch->qp, first_wr, &bad_wr);
2608 	if (ret < 0) {
2609 		pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
2610 			__func__, ioctx->cmd.tag, ret);
2611 		goto out;
2612 	}
2613 
2614 	return;
2615 
2616 out:
2617 	atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2618 	atomic_dec(&ch->req_lim);
2619 	srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2620 	target_put_sess_cmd(&ioctx->cmd);
2621 }
2622 
2623 static int srpt_queue_data_in(struct se_cmd *cmd)
2624 {
2625 	srpt_queue_response(cmd);
2626 	return 0;
2627 }
2628 
2629 static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2630 {
2631 	srpt_queue_response(cmd);
2632 }
2633 
2634 static void srpt_aborted_task(struct se_cmd *cmd)
2635 {
2636 }
2637 
2638 static int srpt_queue_status(struct se_cmd *cmd)
2639 {
2640 	struct srpt_send_ioctx *ioctx;
2641 
2642 	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2643 	BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2644 	if (cmd->se_cmd_flags &
2645 	    (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2646 		WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2647 	ioctx->queue_status_only = true;
2648 	srpt_queue_response(cmd);
2649 	return 0;
2650 }
2651 
2652 static void srpt_refresh_port_work(struct work_struct *work)
2653 {
2654 	struct srpt_port *sport = container_of(work, struct srpt_port, work);
2655 
2656 	srpt_refresh_port(sport);
2657 }
2658 
2659 static bool srpt_ch_list_empty(struct srpt_port *sport)
2660 {
2661 	struct srpt_nexus *nexus;
2662 	bool res = true;
2663 
2664 	rcu_read_lock();
2665 	list_for_each_entry(nexus, &sport->nexus_list, entry)
2666 		if (!list_empty(&nexus->ch_list))
2667 			res = false;
2668 	rcu_read_unlock();
2669 
2670 	return res;
2671 }
2672 
2673 /**
2674  * srpt_release_sport - disable login and wait for associated channels
2675  * @sport: SRPT HCA port.
2676  */
2677 static int srpt_release_sport(struct srpt_port *sport)
2678 {
2679 	struct srpt_nexus *nexus, *next_n;
2680 	struct srpt_rdma_ch *ch;
2681 
2682 	WARN_ON_ONCE(irqs_disabled());
2683 
2684 	mutex_lock(&sport->mutex);
2685 	srpt_set_enabled(sport, false);
2686 	mutex_unlock(&sport->mutex);
2687 
2688 	while (wait_event_timeout(sport->ch_releaseQ,
2689 				  srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
2690 		pr_info("%s_%d: waiting for session unregistration ...\n",
2691 			sport->sdev->device->name, sport->port);
2692 		rcu_read_lock();
2693 		list_for_each_entry(nexus, &sport->nexus_list, entry) {
2694 			list_for_each_entry(ch, &nexus->ch_list, list) {
2695 				pr_info("%s-%d: state %s\n",
2696 					ch->sess_name, ch->qp->qp_num,
2697 					get_ch_state_name(ch->state));
2698 			}
2699 		}
2700 		rcu_read_unlock();
2701 	}
2702 
2703 	mutex_lock(&sport->mutex);
2704 	list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
2705 		list_del(&nexus->entry);
2706 		kfree_rcu(nexus, rcu);
2707 	}
2708 	mutex_unlock(&sport->mutex);
2709 
2710 	return 0;
2711 }
2712 
2713 static struct se_wwn *__srpt_lookup_wwn(const char *name)
2714 {
2715 	struct ib_device *dev;
2716 	struct srpt_device *sdev;
2717 	struct srpt_port *sport;
2718 	int i;
2719 
2720 	list_for_each_entry(sdev, &srpt_dev_list, list) {
2721 		dev = sdev->device;
2722 		if (!dev)
2723 			continue;
2724 
2725 		for (i = 0; i < dev->phys_port_cnt; i++) {
2726 			sport = &sdev->port[i];
2727 
2728 			if (strcmp(sport->port_guid, name) == 0)
2729 				return &sport->port_guid_wwn;
2730 			if (strcmp(sport->port_gid, name) == 0)
2731 				return &sport->port_gid_wwn;
2732 		}
2733 	}
2734 
2735 	return NULL;
2736 }
2737 
2738 static struct se_wwn *srpt_lookup_wwn(const char *name)
2739 {
2740 	struct se_wwn *wwn;
2741 
2742 	spin_lock(&srpt_dev_lock);
2743 	wwn = __srpt_lookup_wwn(name);
2744 	spin_unlock(&srpt_dev_lock);
2745 
2746 	return wwn;
2747 }
2748 
2749 static void srpt_free_srq(struct srpt_device *sdev)
2750 {
2751 	if (!sdev->srq)
2752 		return;
2753 
2754 	ib_destroy_srq(sdev->srq);
2755 	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2756 			     sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
2757 	sdev->srq = NULL;
2758 }
2759 
2760 static int srpt_alloc_srq(struct srpt_device *sdev)
2761 {
2762 	struct ib_srq_init_attr srq_attr = {
2763 		.event_handler = srpt_srq_event,
2764 		.srq_context = (void *)sdev,
2765 		.attr.max_wr = sdev->srq_size,
2766 		.attr.max_sge = 1,
2767 		.srq_type = IB_SRQT_BASIC,
2768 	};
2769 	struct ib_device *device = sdev->device;
2770 	struct ib_srq *srq;
2771 	int i;
2772 
2773 	WARN_ON_ONCE(sdev->srq);
2774 	srq = ib_create_srq(sdev->pd, &srq_attr);
2775 	if (IS_ERR(srq)) {
2776 		pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
2777 		return PTR_ERR(srq);
2778 	}
2779 
2780 	pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
2781 		 sdev->device->attrs.max_srq_wr, device->name);
2782 
2783 	sdev->ioctx_ring = (struct srpt_recv_ioctx **)
2784 		srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
2785 				      sizeof(*sdev->ioctx_ring[0]),
2786 				      srp_max_req_size, DMA_FROM_DEVICE);
2787 	if (!sdev->ioctx_ring) {
2788 		ib_destroy_srq(srq);
2789 		return -ENOMEM;
2790 	}
2791 
2792 	sdev->use_srq = true;
2793 	sdev->srq = srq;
2794 
2795 	for (i = 0; i < sdev->srq_size; ++i) {
2796 		INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
2797 		srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
2798 	}
2799 
2800 	return 0;
2801 }
2802 
2803 static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
2804 {
2805 	struct ib_device *device = sdev->device;
2806 	int ret = 0;
2807 
2808 	if (!use_srq) {
2809 		srpt_free_srq(sdev);
2810 		sdev->use_srq = false;
2811 	} else if (use_srq && !sdev->srq) {
2812 		ret = srpt_alloc_srq(sdev);
2813 	}
2814 	pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
2815 		 sdev->use_srq, ret);
2816 	return ret;
2817 }
2818 
2819 /**
2820  * srpt_add_one - InfiniBand device addition callback function
2821  * @device: Describes a HCA.
2822  */
2823 static void srpt_add_one(struct ib_device *device)
2824 {
2825 	struct srpt_device *sdev;
2826 	struct srpt_port *sport;
2827 	int i;
2828 
2829 	pr_debug("device = %p\n", device);
2830 
2831 	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
2832 	if (!sdev)
2833 		goto err;
2834 
2835 	sdev->device = device;
2836 	mutex_init(&sdev->sdev_mutex);
2837 
2838 	sdev->pd = ib_alloc_pd(device, 0);
2839 	if (IS_ERR(sdev->pd))
2840 		goto free_dev;
2841 
2842 	sdev->lkey = sdev->pd->local_dma_lkey;
2843 
2844 	sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
2845 
2846 	srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
2847 
2848 	if (!srpt_service_guid)
2849 		srpt_service_guid = be64_to_cpu(device->node_guid);
2850 
2851 	sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2852 	if (IS_ERR(sdev->cm_id))
2853 		goto err_ring;
2854 
2855 	/* print out target login information */
2856 	pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
2857 		 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
2858 		 srpt_service_guid, srpt_service_guid);
2859 
2860 	/*
2861 	 * We do not have a consistent service_id (ie. also id_ext of target_id)
2862 	 * to identify this target. We currently use the guid of the first HCA
2863 	 * in the system as service_id; therefore, the target_id will change
2864 	 * if this HCA is gone bad and replaced by different HCA
2865 	 */
2866 	if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
2867 		goto err_cm;
2868 
2869 	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2870 			      srpt_event_handler);
2871 	ib_register_event_handler(&sdev->event_handler);
2872 
2873 	WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
2874 
2875 	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2876 		sport = &sdev->port[i - 1];
2877 		INIT_LIST_HEAD(&sport->nexus_list);
2878 		init_waitqueue_head(&sport->ch_releaseQ);
2879 		mutex_init(&sport->mutex);
2880 		sport->sdev = sdev;
2881 		sport->port = i;
2882 		sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
2883 		sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
2884 		sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
2885 		sport->port_attrib.use_srq = false;
2886 		INIT_WORK(&sport->work, srpt_refresh_port_work);
2887 
2888 		if (srpt_refresh_port(sport)) {
2889 			pr_err("MAD registration failed for %s-%d.\n",
2890 			       sdev->device->name, i);
2891 			goto err_event;
2892 		}
2893 	}
2894 
2895 	spin_lock(&srpt_dev_lock);
2896 	list_add_tail(&sdev->list, &srpt_dev_list);
2897 	spin_unlock(&srpt_dev_lock);
2898 
2899 out:
2900 	ib_set_client_data(device, &srpt_client, sdev);
2901 	pr_debug("added %s.\n", device->name);
2902 	return;
2903 
2904 err_event:
2905 	ib_unregister_event_handler(&sdev->event_handler);
2906 err_cm:
2907 	ib_destroy_cm_id(sdev->cm_id);
2908 err_ring:
2909 	srpt_free_srq(sdev);
2910 	ib_dealloc_pd(sdev->pd);
2911 free_dev:
2912 	kfree(sdev);
2913 err:
2914 	sdev = NULL;
2915 	pr_info("%s(%s) failed.\n", __func__, device->name);
2916 	goto out;
2917 }
2918 
2919 /**
2920  * srpt_remove_one - InfiniBand device removal callback function
2921  * @device: Describes a HCA.
2922  * @client_data: The value passed as the third argument to ib_set_client_data().
2923  */
2924 static void srpt_remove_one(struct ib_device *device, void *client_data)
2925 {
2926 	struct srpt_device *sdev = client_data;
2927 	int i;
2928 
2929 	if (!sdev) {
2930 		pr_info("%s(%s): nothing to do.\n", __func__, device->name);
2931 		return;
2932 	}
2933 
2934 	srpt_unregister_mad_agent(sdev);
2935 
2936 	ib_unregister_event_handler(&sdev->event_handler);
2937 
2938 	/* Cancel any work queued by the just unregistered IB event handler. */
2939 	for (i = 0; i < sdev->device->phys_port_cnt; i++)
2940 		cancel_work_sync(&sdev->port[i].work);
2941 
2942 	ib_destroy_cm_id(sdev->cm_id);
2943 
2944 	/*
2945 	 * Unregistering a target must happen after destroying sdev->cm_id
2946 	 * such that no new SRP_LOGIN_REQ information units can arrive while
2947 	 * destroying the target.
2948 	 */
2949 	spin_lock(&srpt_dev_lock);
2950 	list_del(&sdev->list);
2951 	spin_unlock(&srpt_dev_lock);
2952 
2953 	for (i = 0; i < sdev->device->phys_port_cnt; i++)
2954 		srpt_release_sport(&sdev->port[i]);
2955 
2956 	srpt_free_srq(sdev);
2957 
2958 	ib_dealloc_pd(sdev->pd);
2959 
2960 	kfree(sdev);
2961 }
2962 
2963 static struct ib_client srpt_client = {
2964 	.name = DRV_NAME,
2965 	.add = srpt_add_one,
2966 	.remove = srpt_remove_one
2967 };
2968 
2969 static int srpt_check_true(struct se_portal_group *se_tpg)
2970 {
2971 	return 1;
2972 }
2973 
2974 static int srpt_check_false(struct se_portal_group *se_tpg)
2975 {
2976 	return 0;
2977 }
2978 
2979 static char *srpt_get_fabric_name(void)
2980 {
2981 	return "srpt";
2982 }
2983 
2984 static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
2985 {
2986 	return tpg->se_tpg_wwn->priv;
2987 }
2988 
2989 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
2990 {
2991 	struct srpt_port *sport = srpt_tpg_to_sport(tpg);
2992 
2993 	WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
2994 		     tpg != &sport->port_gid_tpg);
2995 	return tpg == &sport->port_guid_tpg ? sport->port_guid :
2996 		sport->port_gid;
2997 }
2998 
2999 static u16 srpt_get_tag(struct se_portal_group *tpg)
3000 {
3001 	return 1;
3002 }
3003 
3004 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3005 {
3006 	return 1;
3007 }
3008 
3009 static void srpt_release_cmd(struct se_cmd *se_cmd)
3010 {
3011 	struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3012 				struct srpt_send_ioctx, cmd);
3013 	struct srpt_rdma_ch *ch = ioctx->ch;
3014 	unsigned long flags;
3015 
3016 	WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
3017 		     !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3018 
3019 	if (ioctx->n_rw_ctx) {
3020 		srpt_free_rw_ctxs(ch, ioctx);
3021 		ioctx->n_rw_ctx = 0;
3022 	}
3023 
3024 	spin_lock_irqsave(&ch->spinlock, flags);
3025 	list_add(&ioctx->free_list, &ch->free_list);
3026 	spin_unlock_irqrestore(&ch->spinlock, flags);
3027 }
3028 
3029 /**
3030  * srpt_close_session - forcibly close a session
3031  * @se_sess: SCSI target session.
3032  *
3033  * Callback function invoked by the TCM core to clean up sessions associated
3034  * with a node ACL when the user invokes
3035  * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3036  */
3037 static void srpt_close_session(struct se_session *se_sess)
3038 {
3039 	struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
3040 
3041 	srpt_disconnect_ch_sync(ch);
3042 }
3043 
3044 /**
3045  * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
3046  * @se_sess: SCSI target session.
3047  *
3048  * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3049  * This object represents an arbitrary integer used to uniquely identify a
3050  * particular attached remote initiator port to a particular SCSI target port
3051  * within a particular SCSI target device within a particular SCSI instance.
3052  */
3053 static u32 srpt_sess_get_index(struct se_session *se_sess)
3054 {
3055 	return 0;
3056 }
3057 
3058 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3059 {
3060 }
3061 
3062 /* Note: only used from inside debug printk's by the TCM core. */
3063 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3064 {
3065 	struct srpt_send_ioctx *ioctx;
3066 
3067 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3068 	return ioctx->state;
3069 }
3070 
3071 static int srpt_parse_guid(u64 *guid, const char *name)
3072 {
3073 	u16 w[4];
3074 	int ret = -EINVAL;
3075 
3076 	if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
3077 		goto out;
3078 	*guid = get_unaligned_be64(w);
3079 	ret = 0;
3080 out:
3081 	return ret;
3082 }
3083 
3084 /**
3085  * srpt_parse_i_port_id - parse an initiator port ID
3086  * @name: ASCII representation of a 128-bit initiator port ID.
3087  * @i_port_id: Binary 128-bit port ID.
3088  */
3089 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3090 {
3091 	const char *p;
3092 	unsigned len, count, leading_zero_bytes;
3093 	int ret;
3094 
3095 	p = name;
3096 	if (strncasecmp(p, "0x", 2) == 0)
3097 		p += 2;
3098 	ret = -EINVAL;
3099 	len = strlen(p);
3100 	if (len % 2)
3101 		goto out;
3102 	count = min(len / 2, 16U);
3103 	leading_zero_bytes = 16 - count;
3104 	memset(i_port_id, 0, leading_zero_bytes);
3105 	ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
3106 	if (ret < 0)
3107 		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
3108 out:
3109 	return ret;
3110 }
3111 
3112 /*
3113  * configfs callback function invoked for
3114  * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3115  */
3116 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3117 {
3118 	u64 guid;
3119 	u8 i_port_id[16];
3120 	int ret;
3121 
3122 	ret = srpt_parse_guid(&guid, name);
3123 	if (ret < 0)
3124 		ret = srpt_parse_i_port_id(i_port_id, name);
3125 	if (ret < 0)
3126 		pr_err("invalid initiator port ID %s\n", name);
3127 	return ret;
3128 }
3129 
3130 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3131 		char *page)
3132 {
3133 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3134 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3135 
3136 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3137 }
3138 
3139 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3140 		const char *page, size_t count)
3141 {
3142 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3143 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3144 	unsigned long val;
3145 	int ret;
3146 
3147 	ret = kstrtoul(page, 0, &val);
3148 	if (ret < 0) {
3149 		pr_err("kstrtoul() failed with ret: %d\n", ret);
3150 		return -EINVAL;
3151 	}
3152 	if (val > MAX_SRPT_RDMA_SIZE) {
3153 		pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3154 			MAX_SRPT_RDMA_SIZE);
3155 		return -EINVAL;
3156 	}
3157 	if (val < DEFAULT_MAX_RDMA_SIZE) {
3158 		pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3159 			val, DEFAULT_MAX_RDMA_SIZE);
3160 		return -EINVAL;
3161 	}
3162 	sport->port_attrib.srp_max_rdma_size = val;
3163 
3164 	return count;
3165 }
3166 
3167 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3168 		char *page)
3169 {
3170 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3171 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3172 
3173 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3174 }
3175 
3176 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3177 		const char *page, size_t count)
3178 {
3179 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3180 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3181 	unsigned long val;
3182 	int ret;
3183 
3184 	ret = kstrtoul(page, 0, &val);
3185 	if (ret < 0) {
3186 		pr_err("kstrtoul() failed with ret: %d\n", ret);
3187 		return -EINVAL;
3188 	}
3189 	if (val > MAX_SRPT_RSP_SIZE) {
3190 		pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3191 			MAX_SRPT_RSP_SIZE);
3192 		return -EINVAL;
3193 	}
3194 	if (val < MIN_MAX_RSP_SIZE) {
3195 		pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3196 			MIN_MAX_RSP_SIZE);
3197 		return -EINVAL;
3198 	}
3199 	sport->port_attrib.srp_max_rsp_size = val;
3200 
3201 	return count;
3202 }
3203 
3204 static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3205 		char *page)
3206 {
3207 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3208 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3209 
3210 	return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3211 }
3212 
3213 static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3214 		const char *page, size_t count)
3215 {
3216 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3217 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3218 	unsigned long val;
3219 	int ret;
3220 
3221 	ret = kstrtoul(page, 0, &val);
3222 	if (ret < 0) {
3223 		pr_err("kstrtoul() failed with ret: %d\n", ret);
3224 		return -EINVAL;
3225 	}
3226 	if (val > MAX_SRPT_SRQ_SIZE) {
3227 		pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3228 			MAX_SRPT_SRQ_SIZE);
3229 		return -EINVAL;
3230 	}
3231 	if (val < MIN_SRPT_SRQ_SIZE) {
3232 		pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3233 			MIN_SRPT_SRQ_SIZE);
3234 		return -EINVAL;
3235 	}
3236 	sport->port_attrib.srp_sq_size = val;
3237 
3238 	return count;
3239 }
3240 
3241 static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
3242 					    char *page)
3243 {
3244 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3245 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3246 
3247 	return sprintf(page, "%d\n", sport->port_attrib.use_srq);
3248 }
3249 
3250 static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
3251 					     const char *page, size_t count)
3252 {
3253 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3254 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3255 	struct srpt_device *sdev = sport->sdev;
3256 	unsigned long val;
3257 	bool enabled;
3258 	int ret;
3259 
3260 	ret = kstrtoul(page, 0, &val);
3261 	if (ret < 0)
3262 		return ret;
3263 	if (val != !!val)
3264 		return -EINVAL;
3265 
3266 	ret = mutex_lock_interruptible(&sdev->sdev_mutex);
3267 	if (ret < 0)
3268 		return ret;
3269 	ret = mutex_lock_interruptible(&sport->mutex);
3270 	if (ret < 0)
3271 		goto unlock_sdev;
3272 	enabled = sport->enabled;
3273 	/* Log out all initiator systems before changing 'use_srq'. */
3274 	srpt_set_enabled(sport, false);
3275 	sport->port_attrib.use_srq = val;
3276 	srpt_use_srq(sdev, sport->port_attrib.use_srq);
3277 	srpt_set_enabled(sport, enabled);
3278 	ret = count;
3279 	mutex_unlock(&sport->mutex);
3280 unlock_sdev:
3281 	mutex_unlock(&sdev->sdev_mutex);
3282 
3283 	return ret;
3284 }
3285 
3286 CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rdma_size);
3287 CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rsp_size);
3288 CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_sq_size);
3289 CONFIGFS_ATTR(srpt_tpg_attrib_,  use_srq);
3290 
3291 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3292 	&srpt_tpg_attrib_attr_srp_max_rdma_size,
3293 	&srpt_tpg_attrib_attr_srp_max_rsp_size,
3294 	&srpt_tpg_attrib_attr_srp_sq_size,
3295 	&srpt_tpg_attrib_attr_use_srq,
3296 	NULL,
3297 };
3298 
3299 static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
3300 {
3301 	struct se_portal_group *se_tpg = to_tpg(item);
3302 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3303 
3304 	return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3305 }
3306 
3307 static ssize_t srpt_tpg_enable_store(struct config_item *item,
3308 		const char *page, size_t count)
3309 {
3310 	struct se_portal_group *se_tpg = to_tpg(item);
3311 	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3312 	unsigned long tmp;
3313         int ret;
3314 
3315 	ret = kstrtoul(page, 0, &tmp);
3316 	if (ret < 0) {
3317 		pr_err("Unable to extract srpt_tpg_store_enable\n");
3318 		return -EINVAL;
3319 	}
3320 
3321 	if ((tmp != 0) && (tmp != 1)) {
3322 		pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3323 		return -EINVAL;
3324 	}
3325 
3326 	mutex_lock(&sport->mutex);
3327 	srpt_set_enabled(sport, tmp);
3328 	mutex_unlock(&sport->mutex);
3329 
3330 	return count;
3331 }
3332 
3333 CONFIGFS_ATTR(srpt_tpg_, enable);
3334 
3335 static struct configfs_attribute *srpt_tpg_attrs[] = {
3336 	&srpt_tpg_attr_enable,
3337 	NULL,
3338 };
3339 
3340 /**
3341  * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
3342  * @wwn: Corresponds to $driver/$port.
3343  * @group: Not used.
3344  * @name: $tpg.
3345  */
3346 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3347 					     struct config_group *group,
3348 					     const char *name)
3349 {
3350 	struct srpt_port *sport = wwn->priv;
3351 	static struct se_portal_group *tpg;
3352 	int res;
3353 
3354 	WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
3355 		     wwn != &sport->port_gid_wwn);
3356 	tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
3357 		&sport->port_gid_tpg;
3358 	res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
3359 	if (res)
3360 		return ERR_PTR(res);
3361 
3362 	return tpg;
3363 }
3364 
3365 /**
3366  * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
3367  * @tpg: Target portal group to deregister.
3368  */
3369 static void srpt_drop_tpg(struct se_portal_group *tpg)
3370 {
3371 	struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3372 
3373 	sport->enabled = false;
3374 	core_tpg_deregister(tpg);
3375 }
3376 
3377 /**
3378  * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
3379  * @tf: Not used.
3380  * @group: Not used.
3381  * @name: $port.
3382  */
3383 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3384 				      struct config_group *group,
3385 				      const char *name)
3386 {
3387 	return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
3388 }
3389 
3390 /**
3391  * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
3392  * @wwn: $port.
3393  */
3394 static void srpt_drop_tport(struct se_wwn *wwn)
3395 {
3396 }
3397 
3398 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3399 {
3400 	return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3401 }
3402 
3403 CONFIGFS_ATTR_RO(srpt_wwn_, version);
3404 
3405 static struct configfs_attribute *srpt_wwn_attrs[] = {
3406 	&srpt_wwn_attr_version,
3407 	NULL,
3408 };
3409 
3410 static const struct target_core_fabric_ops srpt_template = {
3411 	.module				= THIS_MODULE,
3412 	.name				= "srpt",
3413 	.get_fabric_name		= srpt_get_fabric_name,
3414 	.tpg_get_wwn			= srpt_get_fabric_wwn,
3415 	.tpg_get_tag			= srpt_get_tag,
3416 	.tpg_check_demo_mode		= srpt_check_false,
3417 	.tpg_check_demo_mode_cache	= srpt_check_true,
3418 	.tpg_check_demo_mode_write_protect = srpt_check_true,
3419 	.tpg_check_prod_mode_write_protect = srpt_check_false,
3420 	.tpg_get_inst_index		= srpt_tpg_get_inst_index,
3421 	.release_cmd			= srpt_release_cmd,
3422 	.check_stop_free		= srpt_check_stop_free,
3423 	.close_session			= srpt_close_session,
3424 	.sess_get_index			= srpt_sess_get_index,
3425 	.sess_get_initiator_sid		= NULL,
3426 	.write_pending			= srpt_write_pending,
3427 	.write_pending_status		= srpt_write_pending_status,
3428 	.set_default_node_attributes	= srpt_set_default_node_attrs,
3429 	.get_cmd_state			= srpt_get_tcm_cmd_state,
3430 	.queue_data_in			= srpt_queue_data_in,
3431 	.queue_status			= srpt_queue_status,
3432 	.queue_tm_rsp			= srpt_queue_tm_rsp,
3433 	.aborted_task			= srpt_aborted_task,
3434 	/*
3435 	 * Setup function pointers for generic logic in
3436 	 * target_core_fabric_configfs.c
3437 	 */
3438 	.fabric_make_wwn		= srpt_make_tport,
3439 	.fabric_drop_wwn		= srpt_drop_tport,
3440 	.fabric_make_tpg		= srpt_make_tpg,
3441 	.fabric_drop_tpg		= srpt_drop_tpg,
3442 	.fabric_init_nodeacl		= srpt_init_nodeacl,
3443 
3444 	.tfc_wwn_attrs			= srpt_wwn_attrs,
3445 	.tfc_tpg_base_attrs		= srpt_tpg_attrs,
3446 	.tfc_tpg_attrib_attrs		= srpt_tpg_attrib_attrs,
3447 };
3448 
3449 /**
3450  * srpt_init_module - kernel module initialization
3451  *
3452  * Note: Since ib_register_client() registers callback functions, and since at
3453  * least one of these callback functions (srpt_add_one()) calls target core
3454  * functions, this driver must be registered with the target core before
3455  * ib_register_client() is called.
3456  */
3457 static int __init srpt_init_module(void)
3458 {
3459 	int ret;
3460 
3461 	ret = -EINVAL;
3462 	if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3463 		pr_err("invalid value %d for kernel module parameter"
3464 		       " srp_max_req_size -- must be at least %d.\n",
3465 		       srp_max_req_size, MIN_MAX_REQ_SIZE);
3466 		goto out;
3467 	}
3468 
3469 	if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3470 	    || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3471 		pr_err("invalid value %d for kernel module parameter"
3472 		       " srpt_srq_size -- must be in the range [%d..%d].\n",
3473 		       srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3474 		goto out;
3475 	}
3476 
3477 	ret = target_register_template(&srpt_template);
3478 	if (ret)
3479 		goto out;
3480 
3481 	ret = ib_register_client(&srpt_client);
3482 	if (ret) {
3483 		pr_err("couldn't register IB client\n");
3484 		goto out_unregister_target;
3485 	}
3486 
3487 	return 0;
3488 
3489 out_unregister_target:
3490 	target_unregister_template(&srpt_template);
3491 out:
3492 	return ret;
3493 }
3494 
3495 static void __exit srpt_cleanup_module(void)
3496 {
3497 	ib_unregister_client(&srpt_client);
3498 	target_unregister_template(&srpt_template);
3499 }
3500 
3501 module_init(srpt_init_module);
3502 module_exit(srpt_cleanup_module);
3503