1 /*
2  * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
3  * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  */
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <scsi/scsi_tcq.h>
45 #include <target/configfs_macros.h>
46 #include <target/target_core_base.h>
47 #include <target/target_core_fabric_configfs.h>
48 #include <target/target_core_fabric.h>
49 #include <target/target_core_configfs.h>
50 #include "ib_srpt.h"
51 
52 /* Name of this kernel module. */
53 #define DRV_NAME		"ib_srpt"
54 #define DRV_VERSION		"2.0.0"
55 #define DRV_RELDATE		"2011-02-14"
56 
57 #define SRPT_ID_STRING	"Linux SRP target"
58 
59 #undef pr_fmt
60 #define pr_fmt(fmt) DRV_NAME " " fmt
61 
62 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64 		   "v" DRV_VERSION " (" DRV_RELDATE ")");
65 MODULE_LICENSE("Dual BSD/GPL");
66 
67 /*
68  * Global Variables
69  */
70 
71 static u64 srpt_service_guid;
72 static DEFINE_SPINLOCK(srpt_dev_lock);	/* Protects srpt_dev_list. */
73 static LIST_HEAD(srpt_dev_list);	/* List of srpt_device structures. */
74 
75 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
76 module_param(srp_max_req_size, int, 0444);
77 MODULE_PARM_DESC(srp_max_req_size,
78 		 "Maximum size of SRP request messages in bytes.");
79 
80 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
81 module_param(srpt_srq_size, int, 0444);
82 MODULE_PARM_DESC(srpt_srq_size,
83 		 "Shared receive queue (SRQ) size.");
84 
85 static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
86 {
87 	return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
88 }
89 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
90 		  0444);
91 MODULE_PARM_DESC(srpt_service_guid,
92 		 "Using this value for ioc_guid, id_ext, and cm_listen_id"
93 		 " instead of using the node_guid of the first HCA.");
94 
95 static struct ib_client srpt_client;
96 static struct target_fabric_configfs *srpt_target;
97 static void srpt_release_channel(struct srpt_rdma_ch *ch);
98 static int srpt_queue_status(struct se_cmd *cmd);
99 
100 /**
101  * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
102  */
103 static inline
104 enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
105 {
106 	switch (dir) {
107 	case DMA_TO_DEVICE:	return DMA_FROM_DEVICE;
108 	case DMA_FROM_DEVICE:	return DMA_TO_DEVICE;
109 	default:		return dir;
110 	}
111 }
112 
113 /**
114  * srpt_sdev_name() - Return the name associated with the HCA.
115  *
116  * Examples are ib0, ib1, ...
117  */
118 static inline const char *srpt_sdev_name(struct srpt_device *sdev)
119 {
120 	return sdev->device->name;
121 }
122 
123 static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
124 {
125 	unsigned long flags;
126 	enum rdma_ch_state state;
127 
128 	spin_lock_irqsave(&ch->spinlock, flags);
129 	state = ch->state;
130 	spin_unlock_irqrestore(&ch->spinlock, flags);
131 	return state;
132 }
133 
134 static enum rdma_ch_state
135 srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
136 {
137 	unsigned long flags;
138 	enum rdma_ch_state prev;
139 
140 	spin_lock_irqsave(&ch->spinlock, flags);
141 	prev = ch->state;
142 	ch->state = new_state;
143 	spin_unlock_irqrestore(&ch->spinlock, flags);
144 	return prev;
145 }
146 
147 /**
148  * srpt_test_and_set_ch_state() - Test and set the channel state.
149  *
150  * Returns true if and only if the channel state has been set to the new state.
151  */
152 static bool
153 srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
154 			   enum rdma_ch_state new)
155 {
156 	unsigned long flags;
157 	enum rdma_ch_state prev;
158 
159 	spin_lock_irqsave(&ch->spinlock, flags);
160 	prev = ch->state;
161 	if (prev == old)
162 		ch->state = new;
163 	spin_unlock_irqrestore(&ch->spinlock, flags);
164 	return prev == old;
165 }
166 
167 /**
168  * srpt_event_handler() - Asynchronous IB event callback function.
169  *
170  * Callback function called by the InfiniBand core when an asynchronous IB
171  * event occurs. This callback may occur in interrupt context. See also
172  * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
173  * Architecture Specification.
174  */
175 static void srpt_event_handler(struct ib_event_handler *handler,
176 			       struct ib_event *event)
177 {
178 	struct srpt_device *sdev;
179 	struct srpt_port *sport;
180 
181 	sdev = ib_get_client_data(event->device, &srpt_client);
182 	if (!sdev || sdev->device != event->device)
183 		return;
184 
185 	pr_debug("ASYNC event= %d on device= %s\n", event->event,
186 		 srpt_sdev_name(sdev));
187 
188 	switch (event->event) {
189 	case IB_EVENT_PORT_ERR:
190 		if (event->element.port_num <= sdev->device->phys_port_cnt) {
191 			sport = &sdev->port[event->element.port_num - 1];
192 			sport->lid = 0;
193 			sport->sm_lid = 0;
194 		}
195 		break;
196 	case IB_EVENT_PORT_ACTIVE:
197 	case IB_EVENT_LID_CHANGE:
198 	case IB_EVENT_PKEY_CHANGE:
199 	case IB_EVENT_SM_CHANGE:
200 	case IB_EVENT_CLIENT_REREGISTER:
201 		/* Refresh port data asynchronously. */
202 		if (event->element.port_num <= sdev->device->phys_port_cnt) {
203 			sport = &sdev->port[event->element.port_num - 1];
204 			if (!sport->lid && !sport->sm_lid)
205 				schedule_work(&sport->work);
206 		}
207 		break;
208 	default:
209 		printk(KERN_ERR "received unrecognized IB event %d\n",
210 		       event->event);
211 		break;
212 	}
213 }
214 
215 /**
216  * srpt_srq_event() - SRQ event callback function.
217  */
218 static void srpt_srq_event(struct ib_event *event, void *ctx)
219 {
220 	printk(KERN_INFO "SRQ event %d\n", event->event);
221 }
222 
223 /**
224  * srpt_qp_event() - QP event callback function.
225  */
226 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
227 {
228 	pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
229 		 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
230 
231 	switch (event->event) {
232 	case IB_EVENT_COMM_EST:
233 		ib_cm_notify(ch->cm_id, event->event);
234 		break;
235 	case IB_EVENT_QP_LAST_WQE_REACHED:
236 		if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
237 					       CH_RELEASING))
238 			srpt_release_channel(ch);
239 		else
240 			pr_debug("%s: state %d - ignored LAST_WQE.\n",
241 				 ch->sess_name, srpt_get_ch_state(ch));
242 		break;
243 	default:
244 		printk(KERN_ERR "received unrecognized IB QP event %d\n",
245 		       event->event);
246 		break;
247 	}
248 }
249 
250 /**
251  * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
252  *
253  * @slot: one-based slot number.
254  * @value: four-bit value.
255  *
256  * Copies the lowest four bits of value in element slot of the array of four
257  * bit elements called c_list (controller list). The index slot is one-based.
258  */
259 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
260 {
261 	u16 id;
262 	u8 tmp;
263 
264 	id = (slot - 1) / 2;
265 	if (slot & 0x1) {
266 		tmp = c_list[id] & 0xf;
267 		c_list[id] = (value << 4) | tmp;
268 	} else {
269 		tmp = c_list[id] & 0xf0;
270 		c_list[id] = (value & 0xf) | tmp;
271 	}
272 }
273 
274 /**
275  * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
276  *
277  * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
278  * Specification.
279  */
280 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
281 {
282 	struct ib_class_port_info *cif;
283 
284 	cif = (struct ib_class_port_info *)mad->data;
285 	memset(cif, 0, sizeof *cif);
286 	cif->base_version = 1;
287 	cif->class_version = 1;
288 	cif->resp_time_value = 20;
289 
290 	mad->mad_hdr.status = 0;
291 }
292 
293 /**
294  * srpt_get_iou() - Write IOUnitInfo to a management datagram.
295  *
296  * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
297  * Specification. See also section B.7, table B.6 in the SRP r16a document.
298  */
299 static void srpt_get_iou(struct ib_dm_mad *mad)
300 {
301 	struct ib_dm_iou_info *ioui;
302 	u8 slot;
303 	int i;
304 
305 	ioui = (struct ib_dm_iou_info *)mad->data;
306 	ioui->change_id = __constant_cpu_to_be16(1);
307 	ioui->max_controllers = 16;
308 
309 	/* set present for slot 1 and empty for the rest */
310 	srpt_set_ioc(ioui->controller_list, 1, 1);
311 	for (i = 1, slot = 2; i < 16; i++, slot++)
312 		srpt_set_ioc(ioui->controller_list, slot, 0);
313 
314 	mad->mad_hdr.status = 0;
315 }
316 
317 /**
318  * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
319  *
320  * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
321  * Architecture Specification. See also section B.7, table B.7 in the SRP
322  * r16a document.
323  */
324 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
325 			 struct ib_dm_mad *mad)
326 {
327 	struct srpt_device *sdev = sport->sdev;
328 	struct ib_dm_ioc_profile *iocp;
329 
330 	iocp = (struct ib_dm_ioc_profile *)mad->data;
331 
332 	if (!slot || slot > 16) {
333 		mad->mad_hdr.status
334 			= __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
335 		return;
336 	}
337 
338 	if (slot > 2) {
339 		mad->mad_hdr.status
340 			= __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
341 		return;
342 	}
343 
344 	memset(iocp, 0, sizeof *iocp);
345 	strcpy(iocp->id_string, SRPT_ID_STRING);
346 	iocp->guid = cpu_to_be64(srpt_service_guid);
347 	iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
348 	iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
349 	iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
350 	iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
351 	iocp->subsys_device_id = 0x0;
352 	iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
353 	iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
354 	iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
355 	iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
356 	iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
357 	iocp->rdma_read_depth = 4;
358 	iocp->send_size = cpu_to_be32(srp_max_req_size);
359 	iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
360 					  1U << 24));
361 	iocp->num_svc_entries = 1;
362 	iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
363 		SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
364 
365 	mad->mad_hdr.status = 0;
366 }
367 
368 /**
369  * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
370  *
371  * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
372  * Specification. See also section B.7, table B.8 in the SRP r16a document.
373  */
374 static void srpt_get_svc_entries(u64 ioc_guid,
375 				 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
376 {
377 	struct ib_dm_svc_entries *svc_entries;
378 
379 	WARN_ON(!ioc_guid);
380 
381 	if (!slot || slot > 16) {
382 		mad->mad_hdr.status
383 			= __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
384 		return;
385 	}
386 
387 	if (slot > 2 || lo > hi || hi > 1) {
388 		mad->mad_hdr.status
389 			= __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
390 		return;
391 	}
392 
393 	svc_entries = (struct ib_dm_svc_entries *)mad->data;
394 	memset(svc_entries, 0, sizeof *svc_entries);
395 	svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
396 	snprintf(svc_entries->service_entries[0].name,
397 		 sizeof(svc_entries->service_entries[0].name),
398 		 "%s%016llx",
399 		 SRP_SERVICE_NAME_PREFIX,
400 		 ioc_guid);
401 
402 	mad->mad_hdr.status = 0;
403 }
404 
405 /**
406  * srpt_mgmt_method_get() - Process a received management datagram.
407  * @sp:      source port through which the MAD has been received.
408  * @rq_mad:  received MAD.
409  * @rsp_mad: response MAD.
410  */
411 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
412 				 struct ib_dm_mad *rsp_mad)
413 {
414 	u16 attr_id;
415 	u32 slot;
416 	u8 hi, lo;
417 
418 	attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
419 	switch (attr_id) {
420 	case DM_ATTR_CLASS_PORT_INFO:
421 		srpt_get_class_port_info(rsp_mad);
422 		break;
423 	case DM_ATTR_IOU_INFO:
424 		srpt_get_iou(rsp_mad);
425 		break;
426 	case DM_ATTR_IOC_PROFILE:
427 		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
428 		srpt_get_ioc(sp, slot, rsp_mad);
429 		break;
430 	case DM_ATTR_SVC_ENTRIES:
431 		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
432 		hi = (u8) ((slot >> 8) & 0xff);
433 		lo = (u8) (slot & 0xff);
434 		slot = (u16) ((slot >> 16) & 0xffff);
435 		srpt_get_svc_entries(srpt_service_guid,
436 				     slot, hi, lo, rsp_mad);
437 		break;
438 	default:
439 		rsp_mad->mad_hdr.status =
440 		    __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
441 		break;
442 	}
443 }
444 
445 /**
446  * srpt_mad_send_handler() - Post MAD-send callback function.
447  */
448 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
449 				  struct ib_mad_send_wc *mad_wc)
450 {
451 	ib_destroy_ah(mad_wc->send_buf->ah);
452 	ib_free_send_mad(mad_wc->send_buf);
453 }
454 
455 /**
456  * srpt_mad_recv_handler() - MAD reception callback function.
457  */
458 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
459 				  struct ib_mad_recv_wc *mad_wc)
460 {
461 	struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
462 	struct ib_ah *ah;
463 	struct ib_mad_send_buf *rsp;
464 	struct ib_dm_mad *dm_mad;
465 
466 	if (!mad_wc || !mad_wc->recv_buf.mad)
467 		return;
468 
469 	ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
470 				  mad_wc->recv_buf.grh, mad_agent->port_num);
471 	if (IS_ERR(ah))
472 		goto err;
473 
474 	BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
475 
476 	rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
477 				 mad_wc->wc->pkey_index, 0,
478 				 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
479 				 GFP_KERNEL);
480 	if (IS_ERR(rsp))
481 		goto err_rsp;
482 
483 	rsp->ah = ah;
484 
485 	dm_mad = rsp->mad;
486 	memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
487 	dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
488 	dm_mad->mad_hdr.status = 0;
489 
490 	switch (mad_wc->recv_buf.mad->mad_hdr.method) {
491 	case IB_MGMT_METHOD_GET:
492 		srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
493 		break;
494 	case IB_MGMT_METHOD_SET:
495 		dm_mad->mad_hdr.status =
496 		    __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 		break;
498 	default:
499 		dm_mad->mad_hdr.status =
500 		    __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 		break;
502 	}
503 
504 	if (!ib_post_send_mad(rsp, NULL)) {
505 		ib_free_recv_mad(mad_wc);
506 		/* will destroy_ah & free_send_mad in send completion */
507 		return;
508 	}
509 
510 	ib_free_send_mad(rsp);
511 
512 err_rsp:
513 	ib_destroy_ah(ah);
514 err:
515 	ib_free_recv_mad(mad_wc);
516 }
517 
518 /**
519  * srpt_refresh_port() - Configure a HCA port.
520  *
521  * Enable InfiniBand management datagram processing, update the cached sm_lid,
522  * lid and gid values, and register a callback function for processing MADs
523  * on the specified port.
524  *
525  * Note: It is safe to call this function more than once for the same port.
526  */
527 static int srpt_refresh_port(struct srpt_port *sport)
528 {
529 	struct ib_mad_reg_req reg_req;
530 	struct ib_port_modify port_modify;
531 	struct ib_port_attr port_attr;
532 	int ret;
533 
534 	memset(&port_modify, 0, sizeof port_modify);
535 	port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
536 	port_modify.clr_port_cap_mask = 0;
537 
538 	ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
539 	if (ret)
540 		goto err_mod_port;
541 
542 	ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
543 	if (ret)
544 		goto err_query_port;
545 
546 	sport->sm_lid = port_attr.sm_lid;
547 	sport->lid = port_attr.lid;
548 
549 	ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
550 	if (ret)
551 		goto err_query_port;
552 
553 	if (!sport->mad_agent) {
554 		memset(&reg_req, 0, sizeof reg_req);
555 		reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
556 		reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
557 		set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
558 		set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
559 
560 		sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
561 							 sport->port,
562 							 IB_QPT_GSI,
563 							 &reg_req, 0,
564 							 srpt_mad_send_handler,
565 							 srpt_mad_recv_handler,
566 							 sport);
567 		if (IS_ERR(sport->mad_agent)) {
568 			ret = PTR_ERR(sport->mad_agent);
569 			sport->mad_agent = NULL;
570 			goto err_query_port;
571 		}
572 	}
573 
574 	return 0;
575 
576 err_query_port:
577 
578 	port_modify.set_port_cap_mask = 0;
579 	port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
580 	ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
581 
582 err_mod_port:
583 
584 	return ret;
585 }
586 
587 /**
588  * srpt_unregister_mad_agent() - Unregister MAD callback functions.
589  *
590  * Note: It is safe to call this function more than once for the same device.
591  */
592 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
593 {
594 	struct ib_port_modify port_modify = {
595 		.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
596 	};
597 	struct srpt_port *sport;
598 	int i;
599 
600 	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
601 		sport = &sdev->port[i - 1];
602 		WARN_ON(sport->port != i);
603 		if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
604 			printk(KERN_ERR "disabling MAD processing failed.\n");
605 		if (sport->mad_agent) {
606 			ib_unregister_mad_agent(sport->mad_agent);
607 			sport->mad_agent = NULL;
608 		}
609 	}
610 }
611 
612 /**
613  * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
614  */
615 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
616 					   int ioctx_size, int dma_size,
617 					   enum dma_data_direction dir)
618 {
619 	struct srpt_ioctx *ioctx;
620 
621 	ioctx = kmalloc(ioctx_size, GFP_KERNEL);
622 	if (!ioctx)
623 		goto err;
624 
625 	ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
626 	if (!ioctx->buf)
627 		goto err_free_ioctx;
628 
629 	ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
630 	if (ib_dma_mapping_error(sdev->device, ioctx->dma))
631 		goto err_free_buf;
632 
633 	return ioctx;
634 
635 err_free_buf:
636 	kfree(ioctx->buf);
637 err_free_ioctx:
638 	kfree(ioctx);
639 err:
640 	return NULL;
641 }
642 
643 /**
644  * srpt_free_ioctx() - Free an SRPT I/O context structure.
645  */
646 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
647 			    int dma_size, enum dma_data_direction dir)
648 {
649 	if (!ioctx)
650 		return;
651 
652 	ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
653 	kfree(ioctx->buf);
654 	kfree(ioctx);
655 }
656 
657 /**
658  * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
659  * @sdev:       Device to allocate the I/O context ring for.
660  * @ring_size:  Number of elements in the I/O context ring.
661  * @ioctx_size: I/O context size.
662  * @dma_size:   DMA buffer size.
663  * @dir:        DMA data direction.
664  */
665 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
666 				int ring_size, int ioctx_size,
667 				int dma_size, enum dma_data_direction dir)
668 {
669 	struct srpt_ioctx **ring;
670 	int i;
671 
672 	WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
673 		&& ioctx_size != sizeof(struct srpt_send_ioctx));
674 
675 	ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
676 	if (!ring)
677 		goto out;
678 	for (i = 0; i < ring_size; ++i) {
679 		ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
680 		if (!ring[i])
681 			goto err;
682 		ring[i]->index = i;
683 	}
684 	goto out;
685 
686 err:
687 	while (--i >= 0)
688 		srpt_free_ioctx(sdev, ring[i], dma_size, dir);
689 	kfree(ring);
690 	ring = NULL;
691 out:
692 	return ring;
693 }
694 
695 /**
696  * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
697  */
698 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
699 				 struct srpt_device *sdev, int ring_size,
700 				 int dma_size, enum dma_data_direction dir)
701 {
702 	int i;
703 
704 	for (i = 0; i < ring_size; ++i)
705 		srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
706 	kfree(ioctx_ring);
707 }
708 
709 /**
710  * srpt_get_cmd_state() - Get the state of a SCSI command.
711  */
712 static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
713 {
714 	enum srpt_command_state state;
715 	unsigned long flags;
716 
717 	BUG_ON(!ioctx);
718 
719 	spin_lock_irqsave(&ioctx->spinlock, flags);
720 	state = ioctx->state;
721 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
722 	return state;
723 }
724 
725 /**
726  * srpt_set_cmd_state() - Set the state of a SCSI command.
727  *
728  * Does not modify the state of aborted commands. Returns the previous command
729  * state.
730  */
731 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
732 						  enum srpt_command_state new)
733 {
734 	enum srpt_command_state previous;
735 	unsigned long flags;
736 
737 	BUG_ON(!ioctx);
738 
739 	spin_lock_irqsave(&ioctx->spinlock, flags);
740 	previous = ioctx->state;
741 	if (previous != SRPT_STATE_DONE)
742 		ioctx->state = new;
743 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
744 
745 	return previous;
746 }
747 
748 /**
749  * srpt_test_and_set_cmd_state() - Test and set the state of a command.
750  *
751  * Returns true if and only if the previous command state was equal to 'old'.
752  */
753 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
754 					enum srpt_command_state old,
755 					enum srpt_command_state new)
756 {
757 	enum srpt_command_state previous;
758 	unsigned long flags;
759 
760 	WARN_ON(!ioctx);
761 	WARN_ON(old == SRPT_STATE_DONE);
762 	WARN_ON(new == SRPT_STATE_NEW);
763 
764 	spin_lock_irqsave(&ioctx->spinlock, flags);
765 	previous = ioctx->state;
766 	if (previous == old)
767 		ioctx->state = new;
768 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
769 	return previous == old;
770 }
771 
772 /**
773  * srpt_post_recv() - Post an IB receive request.
774  */
775 static int srpt_post_recv(struct srpt_device *sdev,
776 			  struct srpt_recv_ioctx *ioctx)
777 {
778 	struct ib_sge list;
779 	struct ib_recv_wr wr, *bad_wr;
780 
781 	BUG_ON(!sdev);
782 	wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
783 
784 	list.addr = ioctx->ioctx.dma;
785 	list.length = srp_max_req_size;
786 	list.lkey = sdev->mr->lkey;
787 
788 	wr.next = NULL;
789 	wr.sg_list = &list;
790 	wr.num_sge = 1;
791 
792 	return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
793 }
794 
795 /**
796  * srpt_post_send() - Post an IB send request.
797  *
798  * Returns zero upon success and a non-zero value upon failure.
799  */
800 static int srpt_post_send(struct srpt_rdma_ch *ch,
801 			  struct srpt_send_ioctx *ioctx, int len)
802 {
803 	struct ib_sge list;
804 	struct ib_send_wr wr, *bad_wr;
805 	struct srpt_device *sdev = ch->sport->sdev;
806 	int ret;
807 
808 	atomic_inc(&ch->req_lim);
809 
810 	ret = -ENOMEM;
811 	if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
812 		printk(KERN_WARNING "IB send queue full (needed 1)\n");
813 		goto out;
814 	}
815 
816 	ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
817 				      DMA_TO_DEVICE);
818 
819 	list.addr = ioctx->ioctx.dma;
820 	list.length = len;
821 	list.lkey = sdev->mr->lkey;
822 
823 	wr.next = NULL;
824 	wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
825 	wr.sg_list = &list;
826 	wr.num_sge = 1;
827 	wr.opcode = IB_WR_SEND;
828 	wr.send_flags = IB_SEND_SIGNALED;
829 
830 	ret = ib_post_send(ch->qp, &wr, &bad_wr);
831 
832 out:
833 	if (ret < 0) {
834 		atomic_inc(&ch->sq_wr_avail);
835 		atomic_dec(&ch->req_lim);
836 	}
837 	return ret;
838 }
839 
840 /**
841  * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
842  * @ioctx: Pointer to the I/O context associated with the request.
843  * @srp_cmd: Pointer to the SRP_CMD request data.
844  * @dir: Pointer to the variable to which the transfer direction will be
845  *   written.
846  * @data_len: Pointer to the variable to which the total data length of all
847  *   descriptors in the SRP_CMD request will be written.
848  *
849  * This function initializes ioctx->nrbuf and ioctx->r_bufs.
850  *
851  * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
852  * -ENOMEM when memory allocation fails and zero upon success.
853  */
854 static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
855 			     struct srp_cmd *srp_cmd,
856 			     enum dma_data_direction *dir, u64 *data_len)
857 {
858 	struct srp_indirect_buf *idb;
859 	struct srp_direct_buf *db;
860 	unsigned add_cdb_offset;
861 	int ret;
862 
863 	/*
864 	 * The pointer computations below will only be compiled correctly
865 	 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
866 	 * whether srp_cmd::add_data has been declared as a byte pointer.
867 	 */
868 	BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
869 		     && !__same_type(srp_cmd->add_data[0], (u8)0));
870 
871 	BUG_ON(!dir);
872 	BUG_ON(!data_len);
873 
874 	ret = 0;
875 	*data_len = 0;
876 
877 	/*
878 	 * The lower four bits of the buffer format field contain the DATA-IN
879 	 * buffer descriptor format, and the highest four bits contain the
880 	 * DATA-OUT buffer descriptor format.
881 	 */
882 	*dir = DMA_NONE;
883 	if (srp_cmd->buf_fmt & 0xf)
884 		/* DATA-IN: transfer data from target to initiator (read). */
885 		*dir = DMA_FROM_DEVICE;
886 	else if (srp_cmd->buf_fmt >> 4)
887 		/* DATA-OUT: transfer data from initiator to target (write). */
888 		*dir = DMA_TO_DEVICE;
889 
890 	/*
891 	 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
892 	 * CDB LENGTH' field are reserved and the size in bytes of this field
893 	 * is four times the value specified in bits 3..7. Hence the "& ~3".
894 	 */
895 	add_cdb_offset = srp_cmd->add_cdb_len & ~3;
896 	if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
897 	    ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
898 		ioctx->n_rbuf = 1;
899 		ioctx->rbufs = &ioctx->single_rbuf;
900 
901 		db = (struct srp_direct_buf *)(srp_cmd->add_data
902 					       + add_cdb_offset);
903 		memcpy(ioctx->rbufs, db, sizeof *db);
904 		*data_len = be32_to_cpu(db->len);
905 	} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
906 		   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
907 		idb = (struct srp_indirect_buf *)(srp_cmd->add_data
908 						  + add_cdb_offset);
909 
910 		ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
911 
912 		if (ioctx->n_rbuf >
913 		    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
914 			printk(KERN_ERR "received unsupported SRP_CMD request"
915 			       " type (%u out + %u in != %u / %zu)\n",
916 			       srp_cmd->data_out_desc_cnt,
917 			       srp_cmd->data_in_desc_cnt,
918 			       be32_to_cpu(idb->table_desc.len),
919 			       sizeof(*db));
920 			ioctx->n_rbuf = 0;
921 			ret = -EINVAL;
922 			goto out;
923 		}
924 
925 		if (ioctx->n_rbuf == 1)
926 			ioctx->rbufs = &ioctx->single_rbuf;
927 		else {
928 			ioctx->rbufs =
929 				kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
930 			if (!ioctx->rbufs) {
931 				ioctx->n_rbuf = 0;
932 				ret = -ENOMEM;
933 				goto out;
934 			}
935 		}
936 
937 		db = idb->desc_list;
938 		memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
939 		*data_len = be32_to_cpu(idb->len);
940 	}
941 out:
942 	return ret;
943 }
944 
945 /**
946  * srpt_init_ch_qp() - Initialize queue pair attributes.
947  *
948  * Initialized the attributes of queue pair 'qp' by allowing local write,
949  * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
950  */
951 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
952 {
953 	struct ib_qp_attr *attr;
954 	int ret;
955 
956 	attr = kzalloc(sizeof *attr, GFP_KERNEL);
957 	if (!attr)
958 		return -ENOMEM;
959 
960 	attr->qp_state = IB_QPS_INIT;
961 	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
962 	    IB_ACCESS_REMOTE_WRITE;
963 	attr->port_num = ch->sport->port;
964 	attr->pkey_index = 0;
965 
966 	ret = ib_modify_qp(qp, attr,
967 			   IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
968 			   IB_QP_PKEY_INDEX);
969 
970 	kfree(attr);
971 	return ret;
972 }
973 
974 /**
975  * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
976  * @ch: channel of the queue pair.
977  * @qp: queue pair to change the state of.
978  *
979  * Returns zero upon success and a negative value upon failure.
980  *
981  * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
982  * If this structure ever becomes larger, it might be necessary to allocate
983  * it dynamically instead of on the stack.
984  */
985 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
986 {
987 	struct ib_qp_attr qp_attr;
988 	int attr_mask;
989 	int ret;
990 
991 	qp_attr.qp_state = IB_QPS_RTR;
992 	ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
993 	if (ret)
994 		goto out;
995 
996 	qp_attr.max_dest_rd_atomic = 4;
997 
998 	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
999 
1000 out:
1001 	return ret;
1002 }
1003 
1004 /**
1005  * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1006  * @ch: channel of the queue pair.
1007  * @qp: queue pair to change the state of.
1008  *
1009  * Returns zero upon success and a negative value upon failure.
1010  *
1011  * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1012  * If this structure ever becomes larger, it might be necessary to allocate
1013  * it dynamically instead of on the stack.
1014  */
1015 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1016 {
1017 	struct ib_qp_attr qp_attr;
1018 	int attr_mask;
1019 	int ret;
1020 
1021 	qp_attr.qp_state = IB_QPS_RTS;
1022 	ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1023 	if (ret)
1024 		goto out;
1025 
1026 	qp_attr.max_rd_atomic = 4;
1027 
1028 	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1029 
1030 out:
1031 	return ret;
1032 }
1033 
1034 /**
1035  * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1036  */
1037 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1038 {
1039 	struct ib_qp_attr qp_attr;
1040 
1041 	qp_attr.qp_state = IB_QPS_ERR;
1042 	return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1043 }
1044 
1045 /**
1046  * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1047  */
1048 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1049 				    struct srpt_send_ioctx *ioctx)
1050 {
1051 	struct scatterlist *sg;
1052 	enum dma_data_direction dir;
1053 
1054 	BUG_ON(!ch);
1055 	BUG_ON(!ioctx);
1056 	BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
1057 
1058 	while (ioctx->n_rdma)
1059 		kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
1060 
1061 	kfree(ioctx->rdma_ius);
1062 	ioctx->rdma_ius = NULL;
1063 
1064 	if (ioctx->mapped_sg_count) {
1065 		sg = ioctx->sg;
1066 		WARN_ON(!sg);
1067 		dir = ioctx->cmd.data_direction;
1068 		BUG_ON(dir == DMA_NONE);
1069 		ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1070 				opposite_dma_dir(dir));
1071 		ioctx->mapped_sg_count = 0;
1072 	}
1073 }
1074 
1075 /**
1076  * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1077  */
1078 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1079 				 struct srpt_send_ioctx *ioctx)
1080 {
1081 	struct se_cmd *cmd;
1082 	struct scatterlist *sg, *sg_orig;
1083 	int sg_cnt;
1084 	enum dma_data_direction dir;
1085 	struct rdma_iu *riu;
1086 	struct srp_direct_buf *db;
1087 	dma_addr_t dma_addr;
1088 	struct ib_sge *sge;
1089 	u64 raddr;
1090 	u32 rsize;
1091 	u32 tsize;
1092 	u32 dma_len;
1093 	int count, nrdma;
1094 	int i, j, k;
1095 
1096 	BUG_ON(!ch);
1097 	BUG_ON(!ioctx);
1098 	cmd = &ioctx->cmd;
1099 	dir = cmd->data_direction;
1100 	BUG_ON(dir == DMA_NONE);
1101 
1102 	ioctx->sg = sg = sg_orig = cmd->t_data_sg;
1103 	ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
1104 
1105 	count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1106 			      opposite_dma_dir(dir));
1107 	if (unlikely(!count))
1108 		return -EAGAIN;
1109 
1110 	ioctx->mapped_sg_count = count;
1111 
1112 	if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1113 		nrdma = ioctx->n_rdma_ius;
1114 	else {
1115 		nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1116 			+ ioctx->n_rbuf;
1117 
1118 		ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
1119 		if (!ioctx->rdma_ius)
1120 			goto free_mem;
1121 
1122 		ioctx->n_rdma_ius = nrdma;
1123 	}
1124 
1125 	db = ioctx->rbufs;
1126 	tsize = cmd->data_length;
1127 	dma_len = sg_dma_len(&sg[0]);
1128 	riu = ioctx->rdma_ius;
1129 
1130 	/*
1131 	 * For each remote desc - calculate the #ib_sge.
1132 	 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1133 	 *      each remote desc rdma_iu is required a rdma wr;
1134 	 * else
1135 	 *      we need to allocate extra rdma_iu to carry extra #ib_sge in
1136 	 *      another rdma wr
1137 	 */
1138 	for (i = 0, j = 0;
1139 	     j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1140 		rsize = be32_to_cpu(db->len);
1141 		raddr = be64_to_cpu(db->va);
1142 		riu->raddr = raddr;
1143 		riu->rkey = be32_to_cpu(db->key);
1144 		riu->sge_cnt = 0;
1145 
1146 		/* calculate how many sge required for this remote_buf */
1147 		while (rsize > 0 && tsize > 0) {
1148 
1149 			if (rsize >= dma_len) {
1150 				tsize -= dma_len;
1151 				rsize -= dma_len;
1152 				raddr += dma_len;
1153 
1154 				if (tsize > 0) {
1155 					++j;
1156 					if (j < count) {
1157 						sg = sg_next(sg);
1158 						dma_len = sg_dma_len(sg);
1159 					}
1160 				}
1161 			} else {
1162 				tsize -= rsize;
1163 				dma_len -= rsize;
1164 				rsize = 0;
1165 			}
1166 
1167 			++riu->sge_cnt;
1168 
1169 			if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
1170 				++ioctx->n_rdma;
1171 				riu->sge =
1172 				    kmalloc(riu->sge_cnt * sizeof *riu->sge,
1173 					    GFP_KERNEL);
1174 				if (!riu->sge)
1175 					goto free_mem;
1176 
1177 				++riu;
1178 				riu->sge_cnt = 0;
1179 				riu->raddr = raddr;
1180 				riu->rkey = be32_to_cpu(db->key);
1181 			}
1182 		}
1183 
1184 		++ioctx->n_rdma;
1185 		riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
1186 				   GFP_KERNEL);
1187 		if (!riu->sge)
1188 			goto free_mem;
1189 	}
1190 
1191 	db = ioctx->rbufs;
1192 	tsize = cmd->data_length;
1193 	riu = ioctx->rdma_ius;
1194 	sg = sg_orig;
1195 	dma_len = sg_dma_len(&sg[0]);
1196 	dma_addr = sg_dma_address(&sg[0]);
1197 
1198 	/* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1199 	for (i = 0, j = 0;
1200 	     j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1201 		rsize = be32_to_cpu(db->len);
1202 		sge = riu->sge;
1203 		k = 0;
1204 
1205 		while (rsize > 0 && tsize > 0) {
1206 			sge->addr = dma_addr;
1207 			sge->lkey = ch->sport->sdev->mr->lkey;
1208 
1209 			if (rsize >= dma_len) {
1210 				sge->length =
1211 					(tsize < dma_len) ? tsize : dma_len;
1212 				tsize -= dma_len;
1213 				rsize -= dma_len;
1214 
1215 				if (tsize > 0) {
1216 					++j;
1217 					if (j < count) {
1218 						sg = sg_next(sg);
1219 						dma_len = sg_dma_len(sg);
1220 						dma_addr = sg_dma_address(sg);
1221 					}
1222 				}
1223 			} else {
1224 				sge->length = (tsize < rsize) ? tsize : rsize;
1225 				tsize -= rsize;
1226 				dma_len -= rsize;
1227 				dma_addr += rsize;
1228 				rsize = 0;
1229 			}
1230 
1231 			++k;
1232 			if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
1233 				++riu;
1234 				sge = riu->sge;
1235 				k = 0;
1236 			} else if (rsize > 0 && tsize > 0)
1237 				++sge;
1238 		}
1239 	}
1240 
1241 	return 0;
1242 
1243 free_mem:
1244 	srpt_unmap_sg_to_ib_sge(ch, ioctx);
1245 
1246 	return -ENOMEM;
1247 }
1248 
1249 /**
1250  * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1251  */
1252 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1253 {
1254 	struct srpt_send_ioctx *ioctx;
1255 	unsigned long flags;
1256 
1257 	BUG_ON(!ch);
1258 
1259 	ioctx = NULL;
1260 	spin_lock_irqsave(&ch->spinlock, flags);
1261 	if (!list_empty(&ch->free_list)) {
1262 		ioctx = list_first_entry(&ch->free_list,
1263 					 struct srpt_send_ioctx, free_list);
1264 		list_del(&ioctx->free_list);
1265 	}
1266 	spin_unlock_irqrestore(&ch->spinlock, flags);
1267 
1268 	if (!ioctx)
1269 		return ioctx;
1270 
1271 	BUG_ON(ioctx->ch != ch);
1272 	spin_lock_init(&ioctx->spinlock);
1273 	ioctx->state = SRPT_STATE_NEW;
1274 	ioctx->n_rbuf = 0;
1275 	ioctx->rbufs = NULL;
1276 	ioctx->n_rdma = 0;
1277 	ioctx->n_rdma_ius = 0;
1278 	ioctx->rdma_ius = NULL;
1279 	ioctx->mapped_sg_count = 0;
1280 	init_completion(&ioctx->tx_done);
1281 	ioctx->queue_status_only = false;
1282 	/*
1283 	 * transport_init_se_cmd() does not initialize all fields, so do it
1284 	 * here.
1285 	 */
1286 	memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1287 	memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1288 
1289 	return ioctx;
1290 }
1291 
1292 /**
1293  * srpt_abort_cmd() - Abort a SCSI command.
1294  * @ioctx:   I/O context associated with the SCSI command.
1295  * @context: Preferred execution context.
1296  */
1297 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1298 {
1299 	enum srpt_command_state state;
1300 	unsigned long flags;
1301 
1302 	BUG_ON(!ioctx);
1303 
1304 	/*
1305 	 * If the command is in a state where the target core is waiting for
1306 	 * the ib_srpt driver, change the state to the next state. Changing
1307 	 * the state of the command from SRPT_STATE_NEED_DATA to
1308 	 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
1309 	 * function a second time.
1310 	 */
1311 
1312 	spin_lock_irqsave(&ioctx->spinlock, flags);
1313 	state = ioctx->state;
1314 	switch (state) {
1315 	case SRPT_STATE_NEED_DATA:
1316 		ioctx->state = SRPT_STATE_DATA_IN;
1317 		break;
1318 	case SRPT_STATE_DATA_IN:
1319 	case SRPT_STATE_CMD_RSP_SENT:
1320 	case SRPT_STATE_MGMT_RSP_SENT:
1321 		ioctx->state = SRPT_STATE_DONE;
1322 		break;
1323 	default:
1324 		break;
1325 	}
1326 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
1327 
1328 	if (state == SRPT_STATE_DONE) {
1329 		struct srpt_rdma_ch *ch = ioctx->ch;
1330 
1331 		BUG_ON(ch->sess == NULL);
1332 
1333 		target_put_sess_cmd(ch->sess, &ioctx->cmd);
1334 		goto out;
1335 	}
1336 
1337 	pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1338 		 ioctx->tag);
1339 
1340 	switch (state) {
1341 	case SRPT_STATE_NEW:
1342 	case SRPT_STATE_DATA_IN:
1343 	case SRPT_STATE_MGMT:
1344 		/*
1345 		 * Do nothing - defer abort processing until
1346 		 * srpt_queue_response() is invoked.
1347 		 */
1348 		WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
1349 		break;
1350 	case SRPT_STATE_NEED_DATA:
1351 		/* DMA_TO_DEVICE (write) - RDMA read error. */
1352 
1353 		/* XXX(hch): this is a horrible layering violation.. */
1354 		spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1355 		ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1356 		ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
1357 		spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1358 
1359 		complete(&ioctx->cmd.transport_lun_stop_comp);
1360 		break;
1361 	case SRPT_STATE_CMD_RSP_SENT:
1362 		/*
1363 		 * SRP_RSP sending failed or the SRP_RSP send completion has
1364 		 * not been received in time.
1365 		 */
1366 		srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1367 		spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1368 		ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1369 		spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1370 		target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1371 		break;
1372 	case SRPT_STATE_MGMT_RSP_SENT:
1373 		srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1374 		target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1375 		break;
1376 	default:
1377 		WARN_ON("ERROR: unexpected command state");
1378 		break;
1379 	}
1380 
1381 out:
1382 	return state;
1383 }
1384 
1385 /**
1386  * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
1387  */
1388 static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
1389 {
1390 	struct srpt_send_ioctx *ioctx;
1391 	enum srpt_command_state state;
1392 	struct se_cmd *cmd;
1393 	u32 index;
1394 
1395 	atomic_inc(&ch->sq_wr_avail);
1396 
1397 	index = idx_from_wr_id(wr_id);
1398 	ioctx = ch->ioctx_ring[index];
1399 	state = srpt_get_cmd_state(ioctx);
1400 	cmd = &ioctx->cmd;
1401 
1402 	WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1403 		&& state != SRPT_STATE_MGMT_RSP_SENT
1404 		&& state != SRPT_STATE_NEED_DATA
1405 		&& state != SRPT_STATE_DONE);
1406 
1407 	/* If SRP_RSP sending failed, undo the ch->req_lim change. */
1408 	if (state == SRPT_STATE_CMD_RSP_SENT
1409 	    || state == SRPT_STATE_MGMT_RSP_SENT)
1410 		atomic_dec(&ch->req_lim);
1411 
1412 	srpt_abort_cmd(ioctx);
1413 }
1414 
1415 /**
1416  * srpt_handle_send_comp() - Process an IB send completion notification.
1417  */
1418 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
1419 				  struct srpt_send_ioctx *ioctx)
1420 {
1421 	enum srpt_command_state state;
1422 
1423 	atomic_inc(&ch->sq_wr_avail);
1424 
1425 	state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1426 
1427 	if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1428 		    && state != SRPT_STATE_MGMT_RSP_SENT
1429 		    && state != SRPT_STATE_DONE))
1430 		pr_debug("state = %d\n", state);
1431 
1432 	if (state != SRPT_STATE_DONE) {
1433 		srpt_unmap_sg_to_ib_sge(ch, ioctx);
1434 		transport_generic_free_cmd(&ioctx->cmd, 0);
1435 	} else {
1436 		printk(KERN_ERR "IB completion has been received too late for"
1437 		       " wr_id = %u.\n", ioctx->ioctx.index);
1438 	}
1439 }
1440 
1441 /**
1442  * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
1443  *
1444  * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1445  * the data that has been transferred via IB RDMA had to be postponed until the
1446  * check_stop_free() callback.  None of this is necessary anymore and needs to
1447  * be cleaned up.
1448  */
1449 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
1450 				  struct srpt_send_ioctx *ioctx,
1451 				  enum srpt_opcode opcode)
1452 {
1453 	WARN_ON(ioctx->n_rdma <= 0);
1454 	atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1455 
1456 	if (opcode == SRPT_RDMA_READ_LAST) {
1457 		if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1458 						SRPT_STATE_DATA_IN))
1459 			target_execute_cmd(&ioctx->cmd);
1460 		else
1461 			printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
1462 			       __LINE__, srpt_get_cmd_state(ioctx));
1463 	} else if (opcode == SRPT_RDMA_ABORT) {
1464 		ioctx->rdma_aborted = true;
1465 	} else {
1466 		WARN(true, "unexpected opcode %d\n", opcode);
1467 	}
1468 }
1469 
1470 /**
1471  * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
1472  */
1473 static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1474 				      struct srpt_send_ioctx *ioctx,
1475 				      enum srpt_opcode opcode)
1476 {
1477 	struct se_cmd *cmd;
1478 	enum srpt_command_state state;
1479 	unsigned long flags;
1480 
1481 	cmd = &ioctx->cmd;
1482 	state = srpt_get_cmd_state(ioctx);
1483 	switch (opcode) {
1484 	case SRPT_RDMA_READ_LAST:
1485 		if (ioctx->n_rdma <= 0) {
1486 			printk(KERN_ERR "Received invalid RDMA read"
1487 			       " error completion with idx %d\n",
1488 			       ioctx->ioctx.index);
1489 			break;
1490 		}
1491 		atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1492 		if (state == SRPT_STATE_NEED_DATA)
1493 			srpt_abort_cmd(ioctx);
1494 		else
1495 			printk(KERN_ERR "%s[%d]: wrong state = %d\n",
1496 			       __func__, __LINE__, state);
1497 		break;
1498 	case SRPT_RDMA_WRITE_LAST:
1499 		spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1500 		ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1501 		spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1502 		break;
1503 	default:
1504 		printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
1505 		       __LINE__, opcode);
1506 		break;
1507 	}
1508 }
1509 
1510 /**
1511  * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1512  * @ch: RDMA channel through which the request has been received.
1513  * @ioctx: I/O context associated with the SRP_CMD request. The response will
1514  *   be built in the buffer ioctx->buf points at and hence this function will
1515  *   overwrite the request data.
1516  * @tag: tag of the request for which this response is being generated.
1517  * @status: value for the STATUS field of the SRP_RSP information unit.
1518  *
1519  * Returns the size in bytes of the SRP_RSP response.
1520  *
1521  * An SRP_RSP response contains a SCSI status or service response. See also
1522  * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1523  * response. See also SPC-2 for more information about sense data.
1524  */
1525 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1526 			      struct srpt_send_ioctx *ioctx, u64 tag,
1527 			      int status)
1528 {
1529 	struct srp_rsp *srp_rsp;
1530 	const u8 *sense_data;
1531 	int sense_data_len, max_sense_len;
1532 
1533 	/*
1534 	 * The lowest bit of all SAM-3 status codes is zero (see also
1535 	 * paragraph 5.3 in SAM-3).
1536 	 */
1537 	WARN_ON(status & 1);
1538 
1539 	srp_rsp = ioctx->ioctx.buf;
1540 	BUG_ON(!srp_rsp);
1541 
1542 	sense_data = ioctx->sense_data;
1543 	sense_data_len = ioctx->cmd.scsi_sense_length;
1544 	WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1545 
1546 	memset(srp_rsp, 0, sizeof *srp_rsp);
1547 	srp_rsp->opcode = SRP_RSP;
1548 	srp_rsp->req_lim_delta =
1549 		__constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1550 	srp_rsp->tag = tag;
1551 	srp_rsp->status = status;
1552 
1553 	if (sense_data_len) {
1554 		BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1555 		max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1556 		if (sense_data_len > max_sense_len) {
1557 			printk(KERN_WARNING "truncated sense data from %d to %d"
1558 			       " bytes\n", sense_data_len, max_sense_len);
1559 			sense_data_len = max_sense_len;
1560 		}
1561 
1562 		srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1563 		srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1564 		memcpy(srp_rsp + 1, sense_data, sense_data_len);
1565 	}
1566 
1567 	return sizeof(*srp_rsp) + sense_data_len;
1568 }
1569 
1570 /**
1571  * srpt_build_tskmgmt_rsp() - Build a task management response.
1572  * @ch:       RDMA channel through which the request has been received.
1573  * @ioctx:    I/O context in which the SRP_RSP response will be built.
1574  * @rsp_code: RSP_CODE that will be stored in the response.
1575  * @tag:      Tag of the request for which this response is being generated.
1576  *
1577  * Returns the size in bytes of the SRP_RSP response.
1578  *
1579  * An SRP_RSP response contains a SCSI status or service response. See also
1580  * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1581  * response.
1582  */
1583 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1584 				  struct srpt_send_ioctx *ioctx,
1585 				  u8 rsp_code, u64 tag)
1586 {
1587 	struct srp_rsp *srp_rsp;
1588 	int resp_data_len;
1589 	int resp_len;
1590 
1591 	resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
1592 	resp_len = sizeof(*srp_rsp) + resp_data_len;
1593 
1594 	srp_rsp = ioctx->ioctx.buf;
1595 	BUG_ON(!srp_rsp);
1596 	memset(srp_rsp, 0, sizeof *srp_rsp);
1597 
1598 	srp_rsp->opcode = SRP_RSP;
1599 	srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
1600 				    + atomic_xchg(&ch->req_lim_delta, 0));
1601 	srp_rsp->tag = tag;
1602 
1603 	if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1604 		srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1605 		srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1606 		srp_rsp->data[3] = rsp_code;
1607 	}
1608 
1609 	return resp_len;
1610 }
1611 
1612 #define NO_SUCH_LUN ((uint64_t)-1LL)
1613 
1614 /*
1615  * SCSI LUN addressing method. See also SAM-2 and the section about
1616  * eight byte LUNs.
1617  */
1618 enum scsi_lun_addr_method {
1619 	SCSI_LUN_ADDR_METHOD_PERIPHERAL   = 0,
1620 	SCSI_LUN_ADDR_METHOD_FLAT         = 1,
1621 	SCSI_LUN_ADDR_METHOD_LUN          = 2,
1622 	SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
1623 };
1624 
1625 /*
1626  * srpt_unpack_lun() - Convert from network LUN to linear LUN.
1627  *
1628  * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
1629  * order (big endian) to a linear LUN. Supports three LUN addressing methods:
1630  * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
1631  */
1632 static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1633 {
1634 	uint64_t res = NO_SUCH_LUN;
1635 	int addressing_method;
1636 
1637 	if (unlikely(len < 2)) {
1638 		printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
1639 		       "more", len);
1640 		goto out;
1641 	}
1642 
1643 	switch (len) {
1644 	case 8:
1645 		if ((*((__be64 *)lun) &
1646 		     __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1647 			goto out_err;
1648 		break;
1649 	case 4:
1650 		if (*((__be16 *)&lun[2]) != 0)
1651 			goto out_err;
1652 		break;
1653 	case 6:
1654 		if (*((__be32 *)&lun[2]) != 0)
1655 			goto out_err;
1656 		break;
1657 	case 2:
1658 		break;
1659 	default:
1660 		goto out_err;
1661 	}
1662 
1663 	addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
1664 	switch (addressing_method) {
1665 	case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
1666 	case SCSI_LUN_ADDR_METHOD_FLAT:
1667 	case SCSI_LUN_ADDR_METHOD_LUN:
1668 		res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1669 		break;
1670 
1671 	case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
1672 	default:
1673 		printk(KERN_ERR "Unimplemented LUN addressing method %u",
1674 		       addressing_method);
1675 		break;
1676 	}
1677 
1678 out:
1679 	return res;
1680 
1681 out_err:
1682 	printk(KERN_ERR "Support for multi-level LUNs has not yet been"
1683 	       " implemented");
1684 	goto out;
1685 }
1686 
1687 static int srpt_check_stop_free(struct se_cmd *cmd)
1688 {
1689 	struct srpt_send_ioctx *ioctx = container_of(cmd,
1690 				struct srpt_send_ioctx, cmd);
1691 
1692 	return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1693 }
1694 
1695 /**
1696  * srpt_handle_cmd() - Process SRP_CMD.
1697  */
1698 static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1699 			   struct srpt_recv_ioctx *recv_ioctx,
1700 			   struct srpt_send_ioctx *send_ioctx)
1701 {
1702 	struct se_cmd *cmd;
1703 	struct srp_cmd *srp_cmd;
1704 	uint64_t unpacked_lun;
1705 	u64 data_len;
1706 	enum dma_data_direction dir;
1707 	sense_reason_t ret;
1708 	int rc;
1709 
1710 	BUG_ON(!send_ioctx);
1711 
1712 	srp_cmd = recv_ioctx->ioctx.buf;
1713 	cmd = &send_ioctx->cmd;
1714 	send_ioctx->tag = srp_cmd->tag;
1715 
1716 	switch (srp_cmd->task_attr) {
1717 	case SRP_CMD_SIMPLE_Q:
1718 		cmd->sam_task_attr = MSG_SIMPLE_TAG;
1719 		break;
1720 	case SRP_CMD_ORDERED_Q:
1721 	default:
1722 		cmd->sam_task_attr = MSG_ORDERED_TAG;
1723 		break;
1724 	case SRP_CMD_HEAD_OF_Q:
1725 		cmd->sam_task_attr = MSG_HEAD_TAG;
1726 		break;
1727 	case SRP_CMD_ACA:
1728 		cmd->sam_task_attr = MSG_ACA_TAG;
1729 		break;
1730 	}
1731 
1732 	if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
1733 		printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
1734 		       srp_cmd->tag);
1735 		ret = TCM_INVALID_CDB_FIELD;
1736 		goto send_sense;
1737 	}
1738 
1739 	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
1740 				       sizeof(srp_cmd->lun));
1741 	rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
1742 			&send_ioctx->sense_data[0], unpacked_lun, data_len,
1743 			MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
1744 	if (rc != 0) {
1745 		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1746 		goto send_sense;
1747 	}
1748 	return 0;
1749 
1750 send_sense:
1751 	transport_send_check_condition_and_sense(cmd, ret, 0);
1752 	return -1;
1753 }
1754 
1755 /**
1756  * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
1757  * @ch: RDMA channel of the task management request.
1758  * @fn: Task management function to perform.
1759  * @req_tag: Tag of the SRP task management request.
1760  * @mgmt_ioctx: I/O context of the task management request.
1761  *
1762  * Returns zero if the target core will process the task management
1763  * request asynchronously.
1764  *
1765  * Note: It is assumed that the initiator serializes tag-based task management
1766  * requests.
1767  */
1768 static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
1769 {
1770 	struct srpt_device *sdev;
1771 	struct srpt_rdma_ch *ch;
1772 	struct srpt_send_ioctx *target;
1773 	int ret, i;
1774 
1775 	ret = -EINVAL;
1776 	ch = ioctx->ch;
1777 	BUG_ON(!ch);
1778 	BUG_ON(!ch->sport);
1779 	sdev = ch->sport->sdev;
1780 	BUG_ON(!sdev);
1781 	spin_lock_irq(&sdev->spinlock);
1782 	for (i = 0; i < ch->rq_size; ++i) {
1783 		target = ch->ioctx_ring[i];
1784 		if (target->cmd.se_lun == ioctx->cmd.se_lun &&
1785 		    target->tag == tag &&
1786 		    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
1787 			ret = 0;
1788 			/* now let the target core abort &target->cmd; */
1789 			break;
1790 		}
1791 	}
1792 	spin_unlock_irq(&sdev->spinlock);
1793 	return ret;
1794 }
1795 
1796 static int srp_tmr_to_tcm(int fn)
1797 {
1798 	switch (fn) {
1799 	case SRP_TSK_ABORT_TASK:
1800 		return TMR_ABORT_TASK;
1801 	case SRP_TSK_ABORT_TASK_SET:
1802 		return TMR_ABORT_TASK_SET;
1803 	case SRP_TSK_CLEAR_TASK_SET:
1804 		return TMR_CLEAR_TASK_SET;
1805 	case SRP_TSK_LUN_RESET:
1806 		return TMR_LUN_RESET;
1807 	case SRP_TSK_CLEAR_ACA:
1808 		return TMR_CLEAR_ACA;
1809 	default:
1810 		return -1;
1811 	}
1812 }
1813 
1814 /**
1815  * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1816  *
1817  * Returns 0 if and only if the request will be processed by the target core.
1818  *
1819  * For more information about SRP_TSK_MGMT information units, see also section
1820  * 6.7 in the SRP r16a document.
1821  */
1822 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1823 				 struct srpt_recv_ioctx *recv_ioctx,
1824 				 struct srpt_send_ioctx *send_ioctx)
1825 {
1826 	struct srp_tsk_mgmt *srp_tsk;
1827 	struct se_cmd *cmd;
1828 	struct se_session *sess = ch->sess;
1829 	uint64_t unpacked_lun;
1830 	uint32_t tag = 0;
1831 	int tcm_tmr;
1832 	int rc;
1833 
1834 	BUG_ON(!send_ioctx);
1835 
1836 	srp_tsk = recv_ioctx->ioctx.buf;
1837 	cmd = &send_ioctx->cmd;
1838 
1839 	pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1840 		 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1841 		 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1842 
1843 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1844 	send_ioctx->tag = srp_tsk->tag;
1845 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1846 	if (tcm_tmr < 0) {
1847 		send_ioctx->cmd.se_tmr_req->response =
1848 			TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
1849 		goto fail;
1850 	}
1851 	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
1852 				       sizeof(srp_tsk->lun));
1853 
1854 	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
1855 		rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
1856 		if (rc < 0) {
1857 			send_ioctx->cmd.se_tmr_req->response =
1858 					TMR_TASK_DOES_NOT_EXIST;
1859 			goto fail;
1860 		}
1861 		tag = srp_tsk->task_tag;
1862 	}
1863 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
1864 				srp_tsk, tcm_tmr, GFP_KERNEL, tag,
1865 				TARGET_SCF_ACK_KREF);
1866 	if (rc != 0) {
1867 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1868 		goto fail;
1869 	}
1870 	return;
1871 fail:
1872 	transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
1873 }
1874 
1875 /**
1876  * srpt_handle_new_iu() - Process a newly received information unit.
1877  * @ch:    RDMA channel through which the information unit has been received.
1878  * @ioctx: SRPT I/O context associated with the information unit.
1879  */
1880 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1881 			       struct srpt_recv_ioctx *recv_ioctx,
1882 			       struct srpt_send_ioctx *send_ioctx)
1883 {
1884 	struct srp_cmd *srp_cmd;
1885 	enum rdma_ch_state ch_state;
1886 
1887 	BUG_ON(!ch);
1888 	BUG_ON(!recv_ioctx);
1889 
1890 	ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1891 				   recv_ioctx->ioctx.dma, srp_max_req_size,
1892 				   DMA_FROM_DEVICE);
1893 
1894 	ch_state = srpt_get_ch_state(ch);
1895 	if (unlikely(ch_state == CH_CONNECTING)) {
1896 		list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1897 		goto out;
1898 	}
1899 
1900 	if (unlikely(ch_state != CH_LIVE))
1901 		goto out;
1902 
1903 	srp_cmd = recv_ioctx->ioctx.buf;
1904 	if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1905 		if (!send_ioctx)
1906 			send_ioctx = srpt_get_send_ioctx(ch);
1907 		if (unlikely(!send_ioctx)) {
1908 			list_add_tail(&recv_ioctx->wait_list,
1909 				      &ch->cmd_wait_list);
1910 			goto out;
1911 		}
1912 	}
1913 
1914 	switch (srp_cmd->opcode) {
1915 	case SRP_CMD:
1916 		srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1917 		break;
1918 	case SRP_TSK_MGMT:
1919 		srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1920 		break;
1921 	case SRP_I_LOGOUT:
1922 		printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
1923 		break;
1924 	case SRP_CRED_RSP:
1925 		pr_debug("received SRP_CRED_RSP\n");
1926 		break;
1927 	case SRP_AER_RSP:
1928 		pr_debug("received SRP_AER_RSP\n");
1929 		break;
1930 	case SRP_RSP:
1931 		printk(KERN_ERR "Received SRP_RSP\n");
1932 		break;
1933 	default:
1934 		printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
1935 		       srp_cmd->opcode);
1936 		break;
1937 	}
1938 
1939 	srpt_post_recv(ch->sport->sdev, recv_ioctx);
1940 out:
1941 	return;
1942 }
1943 
1944 static void srpt_process_rcv_completion(struct ib_cq *cq,
1945 					struct srpt_rdma_ch *ch,
1946 					struct ib_wc *wc)
1947 {
1948 	struct srpt_device *sdev = ch->sport->sdev;
1949 	struct srpt_recv_ioctx *ioctx;
1950 	u32 index;
1951 
1952 	index = idx_from_wr_id(wc->wr_id);
1953 	if (wc->status == IB_WC_SUCCESS) {
1954 		int req_lim;
1955 
1956 		req_lim = atomic_dec_return(&ch->req_lim);
1957 		if (unlikely(req_lim < 0))
1958 			printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
1959 		ioctx = sdev->ioctx_ring[index];
1960 		srpt_handle_new_iu(ch, ioctx, NULL);
1961 	} else {
1962 		printk(KERN_INFO "receiving failed for idx %u with status %d\n",
1963 		       index, wc->status);
1964 	}
1965 }
1966 
1967 /**
1968  * srpt_process_send_completion() - Process an IB send completion.
1969  *
1970  * Note: Although this has not yet been observed during tests, at least in
1971  * theory it is possible that the srpt_get_send_ioctx() call invoked by
1972  * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1973  * value in each response is set to one, and it is possible that this response
1974  * makes the initiator send a new request before the send completion for that
1975  * response has been processed. This could e.g. happen if the call to
1976  * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1977  * if IB retransmission causes generation of the send completion to be
1978  * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1979  * are queued on cmd_wait_list. The code below processes these delayed
1980  * requests one at a time.
1981  */
1982 static void srpt_process_send_completion(struct ib_cq *cq,
1983 					 struct srpt_rdma_ch *ch,
1984 					 struct ib_wc *wc)
1985 {
1986 	struct srpt_send_ioctx *send_ioctx;
1987 	uint32_t index;
1988 	enum srpt_opcode opcode;
1989 
1990 	index = idx_from_wr_id(wc->wr_id);
1991 	opcode = opcode_from_wr_id(wc->wr_id);
1992 	send_ioctx = ch->ioctx_ring[index];
1993 	if (wc->status == IB_WC_SUCCESS) {
1994 		if (opcode == SRPT_SEND)
1995 			srpt_handle_send_comp(ch, send_ioctx);
1996 		else {
1997 			WARN_ON(opcode != SRPT_RDMA_ABORT &&
1998 				wc->opcode != IB_WC_RDMA_READ);
1999 			srpt_handle_rdma_comp(ch, send_ioctx, opcode);
2000 		}
2001 	} else {
2002 		if (opcode == SRPT_SEND) {
2003 			printk(KERN_INFO "sending response for idx %u failed"
2004 			       " with status %d\n", index, wc->status);
2005 			srpt_handle_send_err_comp(ch, wc->wr_id);
2006 		} else if (opcode != SRPT_RDMA_MID) {
2007 			printk(KERN_INFO "RDMA t %d for idx %u failed with"
2008 				" status %d", opcode, index, wc->status);
2009 			srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
2010 		}
2011 	}
2012 
2013 	while (unlikely(opcode == SRPT_SEND
2014 			&& !list_empty(&ch->cmd_wait_list)
2015 			&& srpt_get_ch_state(ch) == CH_LIVE
2016 			&& (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
2017 		struct srpt_recv_ioctx *recv_ioctx;
2018 
2019 		recv_ioctx = list_first_entry(&ch->cmd_wait_list,
2020 					      struct srpt_recv_ioctx,
2021 					      wait_list);
2022 		list_del(&recv_ioctx->wait_list);
2023 		srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
2024 	}
2025 }
2026 
2027 static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
2028 {
2029 	struct ib_wc *const wc = ch->wc;
2030 	int i, n;
2031 
2032 	WARN_ON(cq != ch->cq);
2033 
2034 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2035 	while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
2036 		for (i = 0; i < n; i++) {
2037 			if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
2038 				srpt_process_rcv_completion(cq, ch, &wc[i]);
2039 			else
2040 				srpt_process_send_completion(cq, ch, &wc[i]);
2041 		}
2042 	}
2043 }
2044 
2045 /**
2046  * srpt_completion() - IB completion queue callback function.
2047  *
2048  * Notes:
2049  * - It is guaranteed that a completion handler will never be invoked
2050  *   concurrently on two different CPUs for the same completion queue. See also
2051  *   Documentation/infiniband/core_locking.txt and the implementation of
2052  *   handle_edge_irq() in kernel/irq/chip.c.
2053  * - When threaded IRQs are enabled, completion handlers are invoked in thread
2054  *   context instead of interrupt context.
2055  */
2056 static void srpt_completion(struct ib_cq *cq, void *ctx)
2057 {
2058 	struct srpt_rdma_ch *ch = ctx;
2059 
2060 	wake_up_interruptible(&ch->wait_queue);
2061 }
2062 
2063 static int srpt_compl_thread(void *arg)
2064 {
2065 	struct srpt_rdma_ch *ch;
2066 
2067 	/* Hibernation / freezing of the SRPT kernel thread is not supported. */
2068 	current->flags |= PF_NOFREEZE;
2069 
2070 	ch = arg;
2071 	BUG_ON(!ch);
2072 	printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
2073 	       ch->sess_name, ch->thread->comm, current->pid);
2074 	while (!kthread_should_stop()) {
2075 		wait_event_interruptible(ch->wait_queue,
2076 			(srpt_process_completion(ch->cq, ch),
2077 			 kthread_should_stop()));
2078 	}
2079 	printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
2080 	       ch->sess_name, ch->thread->comm, current->pid);
2081 	return 0;
2082 }
2083 
2084 /**
2085  * srpt_create_ch_ib() - Create receive and send completion queues.
2086  */
2087 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2088 {
2089 	struct ib_qp_init_attr *qp_init;
2090 	struct srpt_port *sport = ch->sport;
2091 	struct srpt_device *sdev = sport->sdev;
2092 	u32 srp_sq_size = sport->port_attrib.srp_sq_size;
2093 	int ret;
2094 
2095 	WARN_ON(ch->rq_size < 1);
2096 
2097 	ret = -ENOMEM;
2098 	qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
2099 	if (!qp_init)
2100 		goto out;
2101 
2102 	ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2103 			      ch->rq_size + srp_sq_size, 0);
2104 	if (IS_ERR(ch->cq)) {
2105 		ret = PTR_ERR(ch->cq);
2106 		printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
2107 		       ch->rq_size + srp_sq_size, ret);
2108 		goto out;
2109 	}
2110 
2111 	qp_init->qp_context = (void *)ch;
2112 	qp_init->event_handler
2113 		= (void(*)(struct ib_event *, void*))srpt_qp_event;
2114 	qp_init->send_cq = ch->cq;
2115 	qp_init->recv_cq = ch->cq;
2116 	qp_init->srq = sdev->srq;
2117 	qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
2118 	qp_init->qp_type = IB_QPT_RC;
2119 	qp_init->cap.max_send_wr = srp_sq_size;
2120 	qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
2121 
2122 	ch->qp = ib_create_qp(sdev->pd, qp_init);
2123 	if (IS_ERR(ch->qp)) {
2124 		ret = PTR_ERR(ch->qp);
2125 		printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
2126 		goto err_destroy_cq;
2127 	}
2128 
2129 	atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
2130 
2131 	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
2132 		 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
2133 		 qp_init->cap.max_send_wr, ch->cm_id);
2134 
2135 	ret = srpt_init_ch_qp(ch, ch->qp);
2136 	if (ret)
2137 		goto err_destroy_qp;
2138 
2139 	init_waitqueue_head(&ch->wait_queue);
2140 
2141 	pr_debug("creating thread for session %s\n", ch->sess_name);
2142 
2143 	ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
2144 	if (IS_ERR(ch->thread)) {
2145 		printk(KERN_ERR "failed to create kernel thread %ld\n",
2146 		       PTR_ERR(ch->thread));
2147 		ch->thread = NULL;
2148 		goto err_destroy_qp;
2149 	}
2150 
2151 out:
2152 	kfree(qp_init);
2153 	return ret;
2154 
2155 err_destroy_qp:
2156 	ib_destroy_qp(ch->qp);
2157 err_destroy_cq:
2158 	ib_destroy_cq(ch->cq);
2159 	goto out;
2160 }
2161 
2162 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
2163 {
2164 	if (ch->thread)
2165 		kthread_stop(ch->thread);
2166 
2167 	ib_destroy_qp(ch->qp);
2168 	ib_destroy_cq(ch->cq);
2169 }
2170 
2171 /**
2172  * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
2173  *
2174  * Reset the QP and make sure all resources associated with the channel will
2175  * be deallocated at an appropriate time.
2176  *
2177  * Note: The caller must hold ch->sport->sdev->spinlock.
2178  */
2179 static void __srpt_close_ch(struct srpt_rdma_ch *ch)
2180 {
2181 	struct srpt_device *sdev;
2182 	enum rdma_ch_state prev_state;
2183 	unsigned long flags;
2184 
2185 	sdev = ch->sport->sdev;
2186 
2187 	spin_lock_irqsave(&ch->spinlock, flags);
2188 	prev_state = ch->state;
2189 	switch (prev_state) {
2190 	case CH_CONNECTING:
2191 	case CH_LIVE:
2192 		ch->state = CH_DISCONNECTING;
2193 		break;
2194 	default:
2195 		break;
2196 	}
2197 	spin_unlock_irqrestore(&ch->spinlock, flags);
2198 
2199 	switch (prev_state) {
2200 	case CH_CONNECTING:
2201 		ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
2202 			       NULL, 0);
2203 		/* fall through */
2204 	case CH_LIVE:
2205 		if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
2206 			printk(KERN_ERR "sending CM DREQ failed.\n");
2207 		break;
2208 	case CH_DISCONNECTING:
2209 		break;
2210 	case CH_DRAINING:
2211 	case CH_RELEASING:
2212 		break;
2213 	}
2214 }
2215 
2216 /**
2217  * srpt_close_ch() - Close an RDMA channel.
2218  */
2219 static void srpt_close_ch(struct srpt_rdma_ch *ch)
2220 {
2221 	struct srpt_device *sdev;
2222 
2223 	sdev = ch->sport->sdev;
2224 	spin_lock_irq(&sdev->spinlock);
2225 	__srpt_close_ch(ch);
2226 	spin_unlock_irq(&sdev->spinlock);
2227 }
2228 
2229 /**
2230  * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
2231  * @cm_id: Pointer to the CM ID of the channel to be drained.
2232  *
2233  * Note: Must be called from inside srpt_cm_handler to avoid a race between
2234  * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
2235  * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
2236  * waits until all target sessions for the associated IB device have been
2237  * unregistered and target session registration involves a call to
2238  * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
2239  * this function has finished).
2240  */
2241 static void srpt_drain_channel(struct ib_cm_id *cm_id)
2242 {
2243 	struct srpt_device *sdev;
2244 	struct srpt_rdma_ch *ch;
2245 	int ret;
2246 	bool do_reset = false;
2247 
2248 	WARN_ON_ONCE(irqs_disabled());
2249 
2250 	sdev = cm_id->context;
2251 	BUG_ON(!sdev);
2252 	spin_lock_irq(&sdev->spinlock);
2253 	list_for_each_entry(ch, &sdev->rch_list, list) {
2254 		if (ch->cm_id == cm_id) {
2255 			do_reset = srpt_test_and_set_ch_state(ch,
2256 					CH_CONNECTING, CH_DRAINING) ||
2257 				   srpt_test_and_set_ch_state(ch,
2258 					CH_LIVE, CH_DRAINING) ||
2259 				   srpt_test_and_set_ch_state(ch,
2260 					CH_DISCONNECTING, CH_DRAINING);
2261 			break;
2262 		}
2263 	}
2264 	spin_unlock_irq(&sdev->spinlock);
2265 
2266 	if (do_reset) {
2267 		ret = srpt_ch_qp_err(ch);
2268 		if (ret < 0)
2269 			printk(KERN_ERR "Setting queue pair in error state"
2270 			       " failed: %d\n", ret);
2271 	}
2272 }
2273 
2274 /**
2275  * srpt_find_channel() - Look up an RDMA channel.
2276  * @cm_id: Pointer to the CM ID of the channel to be looked up.
2277  *
2278  * Return NULL if no matching RDMA channel has been found.
2279  */
2280 static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
2281 					      struct ib_cm_id *cm_id)
2282 {
2283 	struct srpt_rdma_ch *ch;
2284 	bool found;
2285 
2286 	WARN_ON_ONCE(irqs_disabled());
2287 	BUG_ON(!sdev);
2288 
2289 	found = false;
2290 	spin_lock_irq(&sdev->spinlock);
2291 	list_for_each_entry(ch, &sdev->rch_list, list) {
2292 		if (ch->cm_id == cm_id) {
2293 			found = true;
2294 			break;
2295 		}
2296 	}
2297 	spin_unlock_irq(&sdev->spinlock);
2298 
2299 	return found ? ch : NULL;
2300 }
2301 
2302 /**
2303  * srpt_release_channel() - Release channel resources.
2304  *
2305  * Schedules the actual release because:
2306  * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
2307  *   trigger a deadlock.
2308  * - It is not safe to call TCM transport_* functions from interrupt context.
2309  */
2310 static void srpt_release_channel(struct srpt_rdma_ch *ch)
2311 {
2312 	schedule_work(&ch->release_work);
2313 }
2314 
2315 static void srpt_release_channel_work(struct work_struct *w)
2316 {
2317 	struct srpt_rdma_ch *ch;
2318 	struct srpt_device *sdev;
2319 	struct se_session *se_sess;
2320 
2321 	ch = container_of(w, struct srpt_rdma_ch, release_work);
2322 	pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2323 		 ch->release_done);
2324 
2325 	sdev = ch->sport->sdev;
2326 	BUG_ON(!sdev);
2327 
2328 	se_sess = ch->sess;
2329 	BUG_ON(!se_sess);
2330 
2331 	target_wait_for_sess_cmds(se_sess, 0);
2332 
2333 	transport_deregister_session_configfs(se_sess);
2334 	transport_deregister_session(se_sess);
2335 	ch->sess = NULL;
2336 
2337 	srpt_destroy_ch_ib(ch);
2338 
2339 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2340 			     ch->sport->sdev, ch->rq_size,
2341 			     ch->rsp_size, DMA_TO_DEVICE);
2342 
2343 	spin_lock_irq(&sdev->spinlock);
2344 	list_del(&ch->list);
2345 	spin_unlock_irq(&sdev->spinlock);
2346 
2347 	ib_destroy_cm_id(ch->cm_id);
2348 
2349 	if (ch->release_done)
2350 		complete(ch->release_done);
2351 
2352 	wake_up(&sdev->ch_releaseQ);
2353 
2354 	kfree(ch);
2355 }
2356 
2357 static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
2358 					       u8 i_port_id[16])
2359 {
2360 	struct srpt_node_acl *nacl;
2361 
2362 	list_for_each_entry(nacl, &sport->port_acl_list, list)
2363 		if (memcmp(nacl->i_port_id, i_port_id,
2364 			   sizeof(nacl->i_port_id)) == 0)
2365 			return nacl;
2366 
2367 	return NULL;
2368 }
2369 
2370 static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
2371 					     u8 i_port_id[16])
2372 {
2373 	struct srpt_node_acl *nacl;
2374 
2375 	spin_lock_irq(&sport->port_acl_lock);
2376 	nacl = __srpt_lookup_acl(sport, i_port_id);
2377 	spin_unlock_irq(&sport->port_acl_lock);
2378 
2379 	return nacl;
2380 }
2381 
2382 /**
2383  * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2384  *
2385  * Ownership of the cm_id is transferred to the target session if this
2386  * functions returns zero. Otherwise the caller remains the owner of cm_id.
2387  */
2388 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2389 			    struct ib_cm_req_event_param *param,
2390 			    void *private_data)
2391 {
2392 	struct srpt_device *sdev = cm_id->context;
2393 	struct srpt_port *sport = &sdev->port[param->port - 1];
2394 	struct srp_login_req *req;
2395 	struct srp_login_rsp *rsp;
2396 	struct srp_login_rej *rej;
2397 	struct ib_cm_rep_param *rep_param;
2398 	struct srpt_rdma_ch *ch, *tmp_ch;
2399 	struct srpt_node_acl *nacl;
2400 	u32 it_iu_len;
2401 	int i;
2402 	int ret = 0;
2403 
2404 	WARN_ON_ONCE(irqs_disabled());
2405 
2406 	if (WARN_ON(!sdev || !private_data))
2407 		return -EINVAL;
2408 
2409 	req = (struct srp_login_req *)private_data;
2410 
2411 	it_iu_len = be32_to_cpu(req->req_it_iu_len);
2412 
2413 	printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2414 	       " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2415 	       " (guid=0x%llx:0x%llx)\n",
2416 	       be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2417 	       be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2418 	       be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2419 	       be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2420 	       it_iu_len,
2421 	       param->port,
2422 	       be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2423 	       be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2424 
2425 	rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
2426 	rej = kzalloc(sizeof *rej, GFP_KERNEL);
2427 	rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
2428 
2429 	if (!rsp || !rej || !rep_param) {
2430 		ret = -ENOMEM;
2431 		goto out;
2432 	}
2433 
2434 	if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2435 		rej->reason = __constant_cpu_to_be32(
2436 				SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2437 		ret = -EINVAL;
2438 		printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
2439 		       " length (%d bytes) is out of range (%d .. %d)\n",
2440 		       it_iu_len, 64, srp_max_req_size);
2441 		goto reject;
2442 	}
2443 
2444 	if (!sport->enabled) {
2445 		rej->reason = __constant_cpu_to_be32(
2446 			     SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2447 		ret = -EINVAL;
2448 		printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
2449 		       " has not yet been enabled\n");
2450 		goto reject;
2451 	}
2452 
2453 	if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2454 		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2455 
2456 		spin_lock_irq(&sdev->spinlock);
2457 
2458 		list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2459 			if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2460 			    && !memcmp(ch->t_port_id, req->target_port_id, 16)
2461 			    && param->port == ch->sport->port
2462 			    && param->listen_id == ch->sport->sdev->cm_id
2463 			    && ch->cm_id) {
2464 				enum rdma_ch_state ch_state;
2465 
2466 				ch_state = srpt_get_ch_state(ch);
2467 				if (ch_state != CH_CONNECTING
2468 				    && ch_state != CH_LIVE)
2469 					continue;
2470 
2471 				/* found an existing channel */
2472 				pr_debug("Found existing channel %s"
2473 					 " cm_id= %p state= %d\n",
2474 					 ch->sess_name, ch->cm_id, ch_state);
2475 
2476 				__srpt_close_ch(ch);
2477 
2478 				rsp->rsp_flags =
2479 					SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2480 			}
2481 		}
2482 
2483 		spin_unlock_irq(&sdev->spinlock);
2484 
2485 	} else
2486 		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2487 
2488 	if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2489 	    || *(__be64 *)(req->target_port_id + 8) !=
2490 	       cpu_to_be64(srpt_service_guid)) {
2491 		rej->reason = __constant_cpu_to_be32(
2492 				SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2493 		ret = -ENOMEM;
2494 		printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
2495 		       " has an invalid target port identifier.\n");
2496 		goto reject;
2497 	}
2498 
2499 	ch = kzalloc(sizeof *ch, GFP_KERNEL);
2500 	if (!ch) {
2501 		rej->reason = __constant_cpu_to_be32(
2502 					SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2503 		printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
2504 		ret = -ENOMEM;
2505 		goto reject;
2506 	}
2507 
2508 	INIT_WORK(&ch->release_work, srpt_release_channel_work);
2509 	memcpy(ch->i_port_id, req->initiator_port_id, 16);
2510 	memcpy(ch->t_port_id, req->target_port_id, 16);
2511 	ch->sport = &sdev->port[param->port - 1];
2512 	ch->cm_id = cm_id;
2513 	/*
2514 	 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2515 	 * for the SRP protocol to the command queue size.
2516 	 */
2517 	ch->rq_size = SRPT_RQ_SIZE;
2518 	spin_lock_init(&ch->spinlock);
2519 	ch->state = CH_CONNECTING;
2520 	INIT_LIST_HEAD(&ch->cmd_wait_list);
2521 	ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2522 
2523 	ch->ioctx_ring = (struct srpt_send_ioctx **)
2524 		srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2525 				      sizeof(*ch->ioctx_ring[0]),
2526 				      ch->rsp_size, DMA_TO_DEVICE);
2527 	if (!ch->ioctx_ring)
2528 		goto free_ch;
2529 
2530 	INIT_LIST_HEAD(&ch->free_list);
2531 	for (i = 0; i < ch->rq_size; i++) {
2532 		ch->ioctx_ring[i]->ch = ch;
2533 		list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2534 	}
2535 
2536 	ret = srpt_create_ch_ib(ch);
2537 	if (ret) {
2538 		rej->reason = __constant_cpu_to_be32(
2539 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2540 		printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
2541 		       " a new RDMA channel failed.\n");
2542 		goto free_ring;
2543 	}
2544 
2545 	ret = srpt_ch_qp_rtr(ch, ch->qp);
2546 	if (ret) {
2547 		rej->reason = __constant_cpu_to_be32(
2548 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2549 		printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
2550 		       " RTR failed (error code = %d)\n", ret);
2551 		goto destroy_ib;
2552 	}
2553 	/*
2554 	 * Use the initator port identifier as the session name.
2555 	 */
2556 	snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2557 			be64_to_cpu(*(__be64 *)ch->i_port_id),
2558 			be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2559 
2560 	pr_debug("registering session %s\n", ch->sess_name);
2561 
2562 	nacl = srpt_lookup_acl(sport, ch->i_port_id);
2563 	if (!nacl) {
2564 		printk(KERN_INFO "Rejected login because no ACL has been"
2565 		       " configured yet for initiator %s.\n", ch->sess_name);
2566 		rej->reason = __constant_cpu_to_be32(
2567 				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2568 		goto destroy_ib;
2569 	}
2570 
2571 	ch->sess = transport_init_session();
2572 	if (IS_ERR(ch->sess)) {
2573 		rej->reason = __constant_cpu_to_be32(
2574 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2575 		pr_debug("Failed to create session\n");
2576 		goto deregister_session;
2577 	}
2578 	ch->sess->se_node_acl = &nacl->nacl;
2579 	transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
2580 
2581 	pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2582 		 ch->sess_name, ch->cm_id);
2583 
2584 	/* create srp_login_response */
2585 	rsp->opcode = SRP_LOGIN_RSP;
2586 	rsp->tag = req->tag;
2587 	rsp->max_it_iu_len = req->req_it_iu_len;
2588 	rsp->max_ti_iu_len = req->req_it_iu_len;
2589 	ch->max_ti_iu_len = it_iu_len;
2590 	rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2591 					      | SRP_BUF_FORMAT_INDIRECT);
2592 	rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2593 	atomic_set(&ch->req_lim, ch->rq_size);
2594 	atomic_set(&ch->req_lim_delta, 0);
2595 
2596 	/* create cm reply */
2597 	rep_param->qp_num = ch->qp->qp_num;
2598 	rep_param->private_data = (void *)rsp;
2599 	rep_param->private_data_len = sizeof *rsp;
2600 	rep_param->rnr_retry_count = 7;
2601 	rep_param->flow_control = 1;
2602 	rep_param->failover_accepted = 0;
2603 	rep_param->srq = 1;
2604 	rep_param->responder_resources = 4;
2605 	rep_param->initiator_depth = 4;
2606 
2607 	ret = ib_send_cm_rep(cm_id, rep_param);
2608 	if (ret) {
2609 		printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
2610 		       " (error code = %d)\n", ret);
2611 		goto release_channel;
2612 	}
2613 
2614 	spin_lock_irq(&sdev->spinlock);
2615 	list_add_tail(&ch->list, &sdev->rch_list);
2616 	spin_unlock_irq(&sdev->spinlock);
2617 
2618 	goto out;
2619 
2620 release_channel:
2621 	srpt_set_ch_state(ch, CH_RELEASING);
2622 	transport_deregister_session_configfs(ch->sess);
2623 
2624 deregister_session:
2625 	transport_deregister_session(ch->sess);
2626 	ch->sess = NULL;
2627 
2628 destroy_ib:
2629 	srpt_destroy_ch_ib(ch);
2630 
2631 free_ring:
2632 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2633 			     ch->sport->sdev, ch->rq_size,
2634 			     ch->rsp_size, DMA_TO_DEVICE);
2635 free_ch:
2636 	kfree(ch);
2637 
2638 reject:
2639 	rej->opcode = SRP_LOGIN_REJ;
2640 	rej->tag = req->tag;
2641 	rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2642 					      | SRP_BUF_FORMAT_INDIRECT);
2643 
2644 	ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2645 			     (void *)rej, sizeof *rej);
2646 
2647 out:
2648 	kfree(rep_param);
2649 	kfree(rsp);
2650 	kfree(rej);
2651 
2652 	return ret;
2653 }
2654 
2655 static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
2656 {
2657 	printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
2658 	srpt_drain_channel(cm_id);
2659 }
2660 
2661 /**
2662  * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2663  *
2664  * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2665  * and that the recipient may begin transmitting (RTU = ready to use).
2666  */
2667 static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
2668 {
2669 	struct srpt_rdma_ch *ch;
2670 	int ret;
2671 
2672 	ch = srpt_find_channel(cm_id->context, cm_id);
2673 	BUG_ON(!ch);
2674 
2675 	if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
2676 		struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
2677 
2678 		ret = srpt_ch_qp_rts(ch, ch->qp);
2679 
2680 		list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2681 					 wait_list) {
2682 			list_del(&ioctx->wait_list);
2683 			srpt_handle_new_iu(ch, ioctx, NULL);
2684 		}
2685 		if (ret)
2686 			srpt_close_ch(ch);
2687 	}
2688 }
2689 
2690 static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
2691 {
2692 	printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
2693 	srpt_drain_channel(cm_id);
2694 }
2695 
2696 static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
2697 {
2698 	printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
2699 	srpt_drain_channel(cm_id);
2700 }
2701 
2702 /**
2703  * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2704  */
2705 static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
2706 {
2707 	struct srpt_rdma_ch *ch;
2708 	unsigned long flags;
2709 	bool send_drep = false;
2710 
2711 	ch = srpt_find_channel(cm_id->context, cm_id);
2712 	BUG_ON(!ch);
2713 
2714 	pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
2715 
2716 	spin_lock_irqsave(&ch->spinlock, flags);
2717 	switch (ch->state) {
2718 	case CH_CONNECTING:
2719 	case CH_LIVE:
2720 		send_drep = true;
2721 		ch->state = CH_DISCONNECTING;
2722 		break;
2723 	case CH_DISCONNECTING:
2724 	case CH_DRAINING:
2725 	case CH_RELEASING:
2726 		WARN(true, "unexpected channel state %d\n", ch->state);
2727 		break;
2728 	}
2729 	spin_unlock_irqrestore(&ch->spinlock, flags);
2730 
2731 	if (send_drep) {
2732 		if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2733 			printk(KERN_ERR "Sending IB DREP failed.\n");
2734 		printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
2735 		       ch->sess_name);
2736 	}
2737 }
2738 
2739 /**
2740  * srpt_cm_drep_recv() - Process reception of a DREP message.
2741  */
2742 static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
2743 {
2744 	printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
2745 	       cm_id);
2746 	srpt_drain_channel(cm_id);
2747 }
2748 
2749 /**
2750  * srpt_cm_handler() - IB connection manager callback function.
2751  *
2752  * A non-zero return value will cause the caller destroy the CM ID.
2753  *
2754  * Note: srpt_cm_handler() must only return a non-zero value when transferring
2755  * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2756  * a non-zero value in any other case will trigger a race with the
2757  * ib_destroy_cm_id() call in srpt_release_channel().
2758  */
2759 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2760 {
2761 	int ret;
2762 
2763 	ret = 0;
2764 	switch (event->event) {
2765 	case IB_CM_REQ_RECEIVED:
2766 		ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2767 				       event->private_data);
2768 		break;
2769 	case IB_CM_REJ_RECEIVED:
2770 		srpt_cm_rej_recv(cm_id);
2771 		break;
2772 	case IB_CM_RTU_RECEIVED:
2773 	case IB_CM_USER_ESTABLISHED:
2774 		srpt_cm_rtu_recv(cm_id);
2775 		break;
2776 	case IB_CM_DREQ_RECEIVED:
2777 		srpt_cm_dreq_recv(cm_id);
2778 		break;
2779 	case IB_CM_DREP_RECEIVED:
2780 		srpt_cm_drep_recv(cm_id);
2781 		break;
2782 	case IB_CM_TIMEWAIT_EXIT:
2783 		srpt_cm_timewait_exit(cm_id);
2784 		break;
2785 	case IB_CM_REP_ERROR:
2786 		srpt_cm_rep_error(cm_id);
2787 		break;
2788 	case IB_CM_DREQ_ERROR:
2789 		printk(KERN_INFO "Received IB DREQ ERROR event.\n");
2790 		break;
2791 	case IB_CM_MRA_RECEIVED:
2792 		printk(KERN_INFO "Received IB MRA event\n");
2793 		break;
2794 	default:
2795 		printk(KERN_ERR "received unrecognized IB CM event %d\n",
2796 		       event->event);
2797 		break;
2798 	}
2799 
2800 	return ret;
2801 }
2802 
2803 /**
2804  * srpt_perform_rdmas() - Perform IB RDMA.
2805  *
2806  * Returns zero upon success or a negative number upon failure.
2807  */
2808 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2809 			      struct srpt_send_ioctx *ioctx)
2810 {
2811 	struct ib_send_wr wr;
2812 	struct ib_send_wr *bad_wr;
2813 	struct rdma_iu *riu;
2814 	int i;
2815 	int ret;
2816 	int sq_wr_avail;
2817 	enum dma_data_direction dir;
2818 	const int n_rdma = ioctx->n_rdma;
2819 
2820 	dir = ioctx->cmd.data_direction;
2821 	if (dir == DMA_TO_DEVICE) {
2822 		/* write */
2823 		ret = -ENOMEM;
2824 		sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2825 		if (sq_wr_avail < 0) {
2826 			printk(KERN_WARNING "IB send queue full (needed %d)\n",
2827 			       n_rdma);
2828 			goto out;
2829 		}
2830 	}
2831 
2832 	ioctx->rdma_aborted = false;
2833 	ret = 0;
2834 	riu = ioctx->rdma_ius;
2835 	memset(&wr, 0, sizeof wr);
2836 
2837 	for (i = 0; i < n_rdma; ++i, ++riu) {
2838 		if (dir == DMA_FROM_DEVICE) {
2839 			wr.opcode = IB_WR_RDMA_WRITE;
2840 			wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2841 						SRPT_RDMA_WRITE_LAST :
2842 						SRPT_RDMA_MID,
2843 						ioctx->ioctx.index);
2844 		} else {
2845 			wr.opcode = IB_WR_RDMA_READ;
2846 			wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2847 						SRPT_RDMA_READ_LAST :
2848 						SRPT_RDMA_MID,
2849 						ioctx->ioctx.index);
2850 		}
2851 		wr.next = NULL;
2852 		wr.wr.rdma.remote_addr = riu->raddr;
2853 		wr.wr.rdma.rkey = riu->rkey;
2854 		wr.num_sge = riu->sge_cnt;
2855 		wr.sg_list = riu->sge;
2856 
2857 		/* only get completion event for the last rdma write */
2858 		if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
2859 			wr.send_flags = IB_SEND_SIGNALED;
2860 
2861 		ret = ib_post_send(ch->qp, &wr, &bad_wr);
2862 		if (ret)
2863 			break;
2864 	}
2865 
2866 	if (ret)
2867 		printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
2868 				 __func__, __LINE__, ret, i, n_rdma);
2869 	if (ret && i > 0) {
2870 		wr.num_sge = 0;
2871 		wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
2872 		wr.send_flags = IB_SEND_SIGNALED;
2873 		while (ch->state == CH_LIVE &&
2874 			ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
2875 			printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
2876 				ioctx->ioctx.index);
2877 			msleep(1000);
2878 		}
2879 		while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
2880 			printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
2881 				ioctx->ioctx.index);
2882 			msleep(1000);
2883 		}
2884 	}
2885 out:
2886 	if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2887 		atomic_add(n_rdma, &ch->sq_wr_avail);
2888 	return ret;
2889 }
2890 
2891 /**
2892  * srpt_xfer_data() - Start data transfer from initiator to target.
2893  */
2894 static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2895 			  struct srpt_send_ioctx *ioctx)
2896 {
2897 	int ret;
2898 
2899 	ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2900 	if (ret) {
2901 		printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2902 		goto out;
2903 	}
2904 
2905 	ret = srpt_perform_rdmas(ch, ioctx);
2906 	if (ret) {
2907 		if (ret == -EAGAIN || ret == -ENOMEM)
2908 			printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
2909 				   __func__, __LINE__, ret);
2910 		else
2911 			printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
2912 			       __func__, __LINE__, ret);
2913 		goto out_unmap;
2914 	}
2915 
2916 out:
2917 	return ret;
2918 out_unmap:
2919 	srpt_unmap_sg_to_ib_sge(ch, ioctx);
2920 	goto out;
2921 }
2922 
2923 static int srpt_write_pending_status(struct se_cmd *se_cmd)
2924 {
2925 	struct srpt_send_ioctx *ioctx;
2926 
2927 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2928 	return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2929 }
2930 
2931 /*
2932  * srpt_write_pending() - Start data transfer from initiator to target (write).
2933  */
2934 static int srpt_write_pending(struct se_cmd *se_cmd)
2935 {
2936 	struct srpt_rdma_ch *ch;
2937 	struct srpt_send_ioctx *ioctx;
2938 	enum srpt_command_state new_state;
2939 	enum rdma_ch_state ch_state;
2940 	int ret;
2941 
2942 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2943 
2944 	new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2945 	WARN_ON(new_state == SRPT_STATE_DONE);
2946 
2947 	ch = ioctx->ch;
2948 	BUG_ON(!ch);
2949 
2950 	ch_state = srpt_get_ch_state(ch);
2951 	switch (ch_state) {
2952 	case CH_CONNECTING:
2953 		WARN(true, "unexpected channel state %d\n", ch_state);
2954 		ret = -EINVAL;
2955 		goto out;
2956 	case CH_LIVE:
2957 		break;
2958 	case CH_DISCONNECTING:
2959 	case CH_DRAINING:
2960 	case CH_RELEASING:
2961 		pr_debug("cmd with tag %lld: channel disconnecting\n",
2962 			 ioctx->tag);
2963 		srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2964 		ret = -EINVAL;
2965 		goto out;
2966 	}
2967 	ret = srpt_xfer_data(ch, ioctx);
2968 
2969 out:
2970 	return ret;
2971 }
2972 
2973 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2974 {
2975 	switch (tcm_mgmt_status) {
2976 	case TMR_FUNCTION_COMPLETE:
2977 		return SRP_TSK_MGMT_SUCCESS;
2978 	case TMR_FUNCTION_REJECTED:
2979 		return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2980 	}
2981 	return SRP_TSK_MGMT_FAILED;
2982 }
2983 
2984 /**
2985  * srpt_queue_response() - Transmits the response to a SCSI command.
2986  *
2987  * Callback function called by the TCM core. Must not block since it can be
2988  * invoked on the context of the IB completion handler.
2989  */
2990 static int srpt_queue_response(struct se_cmd *cmd)
2991 {
2992 	struct srpt_rdma_ch *ch;
2993 	struct srpt_send_ioctx *ioctx;
2994 	enum srpt_command_state state;
2995 	unsigned long flags;
2996 	int ret;
2997 	enum dma_data_direction dir;
2998 	int resp_len;
2999 	u8 srp_tm_status;
3000 
3001 	ret = 0;
3002 
3003 	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3004 	ch = ioctx->ch;
3005 	BUG_ON(!ch);
3006 
3007 	spin_lock_irqsave(&ioctx->spinlock, flags);
3008 	state = ioctx->state;
3009 	switch (state) {
3010 	case SRPT_STATE_NEW:
3011 	case SRPT_STATE_DATA_IN:
3012 		ioctx->state = SRPT_STATE_CMD_RSP_SENT;
3013 		break;
3014 	case SRPT_STATE_MGMT:
3015 		ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
3016 		break;
3017 	default:
3018 		WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
3019 			ch, ioctx->ioctx.index, ioctx->state);
3020 		break;
3021 	}
3022 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
3023 
3024 	if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
3025 		     || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
3026 		atomic_inc(&ch->req_lim_delta);
3027 		srpt_abort_cmd(ioctx);
3028 		goto out;
3029 	}
3030 
3031 	dir = ioctx->cmd.data_direction;
3032 
3033 	/* For read commands, transfer the data to the initiator. */
3034 	if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
3035 	    !ioctx->queue_status_only) {
3036 		ret = srpt_xfer_data(ch, ioctx);
3037 		if (ret) {
3038 			printk(KERN_ERR "xfer_data failed for tag %llu\n",
3039 			       ioctx->tag);
3040 			goto out;
3041 		}
3042 	}
3043 
3044 	if (state != SRPT_STATE_MGMT)
3045 		resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
3046 					      cmd->scsi_status);
3047 	else {
3048 		srp_tm_status
3049 			= tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
3050 		resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
3051 						 ioctx->tag);
3052 	}
3053 	ret = srpt_post_send(ch, ioctx, resp_len);
3054 	if (ret) {
3055 		printk(KERN_ERR "sending cmd response failed for tag %llu\n",
3056 		       ioctx->tag);
3057 		srpt_unmap_sg_to_ib_sge(ch, ioctx);
3058 		srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
3059 		target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
3060 	}
3061 
3062 out:
3063 	return ret;
3064 }
3065 
3066 static int srpt_queue_status(struct se_cmd *cmd)
3067 {
3068 	struct srpt_send_ioctx *ioctx;
3069 
3070 	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3071 	BUG_ON(ioctx->sense_data != cmd->sense_buffer);
3072 	if (cmd->se_cmd_flags &
3073 	    (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
3074 		WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
3075 	ioctx->queue_status_only = true;
3076 	return srpt_queue_response(cmd);
3077 }
3078 
3079 static void srpt_refresh_port_work(struct work_struct *work)
3080 {
3081 	struct srpt_port *sport = container_of(work, struct srpt_port, work);
3082 
3083 	srpt_refresh_port(sport);
3084 }
3085 
3086 static int srpt_ch_list_empty(struct srpt_device *sdev)
3087 {
3088 	int res;
3089 
3090 	spin_lock_irq(&sdev->spinlock);
3091 	res = list_empty(&sdev->rch_list);
3092 	spin_unlock_irq(&sdev->spinlock);
3093 
3094 	return res;
3095 }
3096 
3097 /**
3098  * srpt_release_sdev() - Free the channel resources associated with a target.
3099  */
3100 static int srpt_release_sdev(struct srpt_device *sdev)
3101 {
3102 	struct srpt_rdma_ch *ch, *tmp_ch;
3103 	int res;
3104 
3105 	WARN_ON_ONCE(irqs_disabled());
3106 
3107 	BUG_ON(!sdev);
3108 
3109 	spin_lock_irq(&sdev->spinlock);
3110 	list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
3111 		__srpt_close_ch(ch);
3112 	spin_unlock_irq(&sdev->spinlock);
3113 
3114 	res = wait_event_interruptible(sdev->ch_releaseQ,
3115 				       srpt_ch_list_empty(sdev));
3116 	if (res)
3117 		printk(KERN_ERR "%s: interrupted.\n", __func__);
3118 
3119 	return 0;
3120 }
3121 
3122 static struct srpt_port *__srpt_lookup_port(const char *name)
3123 {
3124 	struct ib_device *dev;
3125 	struct srpt_device *sdev;
3126 	struct srpt_port *sport;
3127 	int i;
3128 
3129 	list_for_each_entry(sdev, &srpt_dev_list, list) {
3130 		dev = sdev->device;
3131 		if (!dev)
3132 			continue;
3133 
3134 		for (i = 0; i < dev->phys_port_cnt; i++) {
3135 			sport = &sdev->port[i];
3136 
3137 			if (!strcmp(sport->port_guid, name))
3138 				return sport;
3139 		}
3140 	}
3141 
3142 	return NULL;
3143 }
3144 
3145 static struct srpt_port *srpt_lookup_port(const char *name)
3146 {
3147 	struct srpt_port *sport;
3148 
3149 	spin_lock(&srpt_dev_lock);
3150 	sport = __srpt_lookup_port(name);
3151 	spin_unlock(&srpt_dev_lock);
3152 
3153 	return sport;
3154 }
3155 
3156 /**
3157  * srpt_add_one() - Infiniband device addition callback function.
3158  */
3159 static void srpt_add_one(struct ib_device *device)
3160 {
3161 	struct srpt_device *sdev;
3162 	struct srpt_port *sport;
3163 	struct ib_srq_init_attr srq_attr;
3164 	int i;
3165 
3166 	pr_debug("device = %p, device->dma_ops = %p\n", device,
3167 		 device->dma_ops);
3168 
3169 	sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
3170 	if (!sdev)
3171 		goto err;
3172 
3173 	sdev->device = device;
3174 	INIT_LIST_HEAD(&sdev->rch_list);
3175 	init_waitqueue_head(&sdev->ch_releaseQ);
3176 	spin_lock_init(&sdev->spinlock);
3177 
3178 	if (ib_query_device(device, &sdev->dev_attr))
3179 		goto free_dev;
3180 
3181 	sdev->pd = ib_alloc_pd(device);
3182 	if (IS_ERR(sdev->pd))
3183 		goto free_dev;
3184 
3185 	sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
3186 	if (IS_ERR(sdev->mr))
3187 		goto err_pd;
3188 
3189 	sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
3190 
3191 	srq_attr.event_handler = srpt_srq_event;
3192 	srq_attr.srq_context = (void *)sdev;
3193 	srq_attr.attr.max_wr = sdev->srq_size;
3194 	srq_attr.attr.max_sge = 1;
3195 	srq_attr.attr.srq_limit = 0;
3196 	srq_attr.srq_type = IB_SRQT_BASIC;
3197 
3198 	sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
3199 	if (IS_ERR(sdev->srq))
3200 		goto err_mr;
3201 
3202 	pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
3203 		 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
3204 		 device->name);
3205 
3206 	if (!srpt_service_guid)
3207 		srpt_service_guid = be64_to_cpu(device->node_guid);
3208 
3209 	sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3210 	if (IS_ERR(sdev->cm_id))
3211 		goto err_srq;
3212 
3213 	/* print out target login information */
3214 	pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3215 		 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3216 		 srpt_service_guid, srpt_service_guid);
3217 
3218 	/*
3219 	 * We do not have a consistent service_id (ie. also id_ext of target_id)
3220 	 * to identify this target. We currently use the guid of the first HCA
3221 	 * in the system as service_id; therefore, the target_id will change
3222 	 * if this HCA is gone bad and replaced by different HCA
3223 	 */
3224 	if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
3225 		goto err_cm;
3226 
3227 	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3228 			      srpt_event_handler);
3229 	if (ib_register_event_handler(&sdev->event_handler))
3230 		goto err_cm;
3231 
3232 	sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3233 		srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3234 				      sizeof(*sdev->ioctx_ring[0]),
3235 				      srp_max_req_size, DMA_FROM_DEVICE);
3236 	if (!sdev->ioctx_ring)
3237 		goto err_event;
3238 
3239 	for (i = 0; i < sdev->srq_size; ++i)
3240 		srpt_post_recv(sdev, sdev->ioctx_ring[i]);
3241 
3242 	WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
3243 
3244 	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3245 		sport = &sdev->port[i - 1];
3246 		sport->sdev = sdev;
3247 		sport->port = i;
3248 		sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3249 		sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3250 		sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3251 		INIT_WORK(&sport->work, srpt_refresh_port_work);
3252 		INIT_LIST_HEAD(&sport->port_acl_list);
3253 		spin_lock_init(&sport->port_acl_lock);
3254 
3255 		if (srpt_refresh_port(sport)) {
3256 			printk(KERN_ERR "MAD registration failed for %s-%d.\n",
3257 			       srpt_sdev_name(sdev), i);
3258 			goto err_ring;
3259 		}
3260 		snprintf(sport->port_guid, sizeof(sport->port_guid),
3261 			"0x%016llx%016llx",
3262 			be64_to_cpu(sport->gid.global.subnet_prefix),
3263 			be64_to_cpu(sport->gid.global.interface_id));
3264 	}
3265 
3266 	spin_lock(&srpt_dev_lock);
3267 	list_add_tail(&sdev->list, &srpt_dev_list);
3268 	spin_unlock(&srpt_dev_lock);
3269 
3270 out:
3271 	ib_set_client_data(device, &srpt_client, sdev);
3272 	pr_debug("added %s.\n", device->name);
3273 	return;
3274 
3275 err_ring:
3276 	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3277 			     sdev->srq_size, srp_max_req_size,
3278 			     DMA_FROM_DEVICE);
3279 err_event:
3280 	ib_unregister_event_handler(&sdev->event_handler);
3281 err_cm:
3282 	ib_destroy_cm_id(sdev->cm_id);
3283 err_srq:
3284 	ib_destroy_srq(sdev->srq);
3285 err_mr:
3286 	ib_dereg_mr(sdev->mr);
3287 err_pd:
3288 	ib_dealloc_pd(sdev->pd);
3289 free_dev:
3290 	kfree(sdev);
3291 err:
3292 	sdev = NULL;
3293 	printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
3294 	goto out;
3295 }
3296 
3297 /**
3298  * srpt_remove_one() - InfiniBand device removal callback function.
3299  */
3300 static void srpt_remove_one(struct ib_device *device)
3301 {
3302 	struct srpt_device *sdev;
3303 	int i;
3304 
3305 	sdev = ib_get_client_data(device, &srpt_client);
3306 	if (!sdev) {
3307 		printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
3308 		       device->name);
3309 		return;
3310 	}
3311 
3312 	srpt_unregister_mad_agent(sdev);
3313 
3314 	ib_unregister_event_handler(&sdev->event_handler);
3315 
3316 	/* Cancel any work queued by the just unregistered IB event handler. */
3317 	for (i = 0; i < sdev->device->phys_port_cnt; i++)
3318 		cancel_work_sync(&sdev->port[i].work);
3319 
3320 	ib_destroy_cm_id(sdev->cm_id);
3321 
3322 	/*
3323 	 * Unregistering a target must happen after destroying sdev->cm_id
3324 	 * such that no new SRP_LOGIN_REQ information units can arrive while
3325 	 * destroying the target.
3326 	 */
3327 	spin_lock(&srpt_dev_lock);
3328 	list_del(&sdev->list);
3329 	spin_unlock(&srpt_dev_lock);
3330 	srpt_release_sdev(sdev);
3331 
3332 	ib_destroy_srq(sdev->srq);
3333 	ib_dereg_mr(sdev->mr);
3334 	ib_dealloc_pd(sdev->pd);
3335 
3336 	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3337 			     sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3338 	sdev->ioctx_ring = NULL;
3339 	kfree(sdev);
3340 }
3341 
3342 static struct ib_client srpt_client = {
3343 	.name = DRV_NAME,
3344 	.add = srpt_add_one,
3345 	.remove = srpt_remove_one
3346 };
3347 
3348 static int srpt_check_true(struct se_portal_group *se_tpg)
3349 {
3350 	return 1;
3351 }
3352 
3353 static int srpt_check_false(struct se_portal_group *se_tpg)
3354 {
3355 	return 0;
3356 }
3357 
3358 static char *srpt_get_fabric_name(void)
3359 {
3360 	return "srpt";
3361 }
3362 
3363 static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
3364 {
3365 	return SCSI_TRANSPORTID_PROTOCOLID_SRP;
3366 }
3367 
3368 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3369 {
3370 	struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3371 
3372 	return sport->port_guid;
3373 }
3374 
3375 static u16 srpt_get_tag(struct se_portal_group *tpg)
3376 {
3377 	return 1;
3378 }
3379 
3380 static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
3381 {
3382 	return 1;
3383 }
3384 
3385 static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
3386 				    struct se_node_acl *se_nacl,
3387 				    struct t10_pr_registration *pr_reg,
3388 				    int *format_code, unsigned char *buf)
3389 {
3390 	struct srpt_node_acl *nacl;
3391 	struct spc_rdma_transport_id *tr_id;
3392 
3393 	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3394 	tr_id = (void *)buf;
3395 	tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
3396 	memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
3397 	return sizeof(*tr_id);
3398 }
3399 
3400 static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
3401 					struct se_node_acl *se_nacl,
3402 					struct t10_pr_registration *pr_reg,
3403 					int *format_code)
3404 {
3405 	*format_code = 0;
3406 	return sizeof(struct spc_rdma_transport_id);
3407 }
3408 
3409 static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
3410 					    const char *buf, u32 *out_tid_len,
3411 					    char **port_nexus_ptr)
3412 {
3413 	struct spc_rdma_transport_id *tr_id;
3414 
3415 	*port_nexus_ptr = NULL;
3416 	*out_tid_len = sizeof(struct spc_rdma_transport_id);
3417 	tr_id = (void *)buf;
3418 	return (char *)tr_id->i_port_id;
3419 }
3420 
3421 static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
3422 {
3423 	struct srpt_node_acl *nacl;
3424 
3425 	nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
3426 	if (!nacl) {
3427 		printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n");
3428 		return NULL;
3429 	}
3430 
3431 	return &nacl->nacl;
3432 }
3433 
3434 static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
3435 				    struct se_node_acl *se_nacl)
3436 {
3437 	struct srpt_node_acl *nacl;
3438 
3439 	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3440 	kfree(nacl);
3441 }
3442 
3443 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3444 {
3445 	return 1;
3446 }
3447 
3448 static void srpt_release_cmd(struct se_cmd *se_cmd)
3449 {
3450 	struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3451 				struct srpt_send_ioctx, cmd);
3452 	struct srpt_rdma_ch *ch = ioctx->ch;
3453 	unsigned long flags;
3454 
3455 	WARN_ON(ioctx->state != SRPT_STATE_DONE);
3456 	WARN_ON(ioctx->mapped_sg_count != 0);
3457 
3458 	if (ioctx->n_rbuf > 1) {
3459 		kfree(ioctx->rbufs);
3460 		ioctx->rbufs = NULL;
3461 		ioctx->n_rbuf = 0;
3462 	}
3463 
3464 	spin_lock_irqsave(&ch->spinlock, flags);
3465 	list_add(&ioctx->free_list, &ch->free_list);
3466 	spin_unlock_irqrestore(&ch->spinlock, flags);
3467 }
3468 
3469 /**
3470  * srpt_shutdown_session() - Whether or not a session may be shut down.
3471  */
3472 static int srpt_shutdown_session(struct se_session *se_sess)
3473 {
3474 	return true;
3475 }
3476 
3477 /**
3478  * srpt_close_session() - Forcibly close a session.
3479  *
3480  * Callback function invoked by the TCM core to clean up sessions associated
3481  * with a node ACL when the user invokes
3482  * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3483  */
3484 static void srpt_close_session(struct se_session *se_sess)
3485 {
3486 	DECLARE_COMPLETION_ONSTACK(release_done);
3487 	struct srpt_rdma_ch *ch;
3488 	struct srpt_device *sdev;
3489 	int res;
3490 
3491 	ch = se_sess->fabric_sess_ptr;
3492 	WARN_ON(ch->sess != se_sess);
3493 
3494 	pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
3495 
3496 	sdev = ch->sport->sdev;
3497 	spin_lock_irq(&sdev->spinlock);
3498 	BUG_ON(ch->release_done);
3499 	ch->release_done = &release_done;
3500 	__srpt_close_ch(ch);
3501 	spin_unlock_irq(&sdev->spinlock);
3502 
3503 	res = wait_for_completion_timeout(&release_done, 60 * HZ);
3504 	WARN_ON(res <= 0);
3505 }
3506 
3507 /**
3508  * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
3509  *
3510  * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3511  * This object represents an arbitrary integer used to uniquely identify a
3512  * particular attached remote initiator port to a particular SCSI target port
3513  * within a particular SCSI target device within a particular SCSI instance.
3514  */
3515 static u32 srpt_sess_get_index(struct se_session *se_sess)
3516 {
3517 	return 0;
3518 }
3519 
3520 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3521 {
3522 }
3523 
3524 static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
3525 {
3526 	struct srpt_send_ioctx *ioctx;
3527 
3528 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3529 	return ioctx->tag;
3530 }
3531 
3532 /* Note: only used from inside debug printk's by the TCM core. */
3533 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3534 {
3535 	struct srpt_send_ioctx *ioctx;
3536 
3537 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3538 	return srpt_get_cmd_state(ioctx);
3539 }
3540 
3541 /**
3542  * srpt_parse_i_port_id() - Parse an initiator port ID.
3543  * @name: ASCII representation of a 128-bit initiator port ID.
3544  * @i_port_id: Binary 128-bit port ID.
3545  */
3546 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3547 {
3548 	const char *p;
3549 	unsigned len, count, leading_zero_bytes;
3550 	int ret, rc;
3551 
3552 	p = name;
3553 	if (strnicmp(p, "0x", 2) == 0)
3554 		p += 2;
3555 	ret = -EINVAL;
3556 	len = strlen(p);
3557 	if (len % 2)
3558 		goto out;
3559 	count = min(len / 2, 16U);
3560 	leading_zero_bytes = 16 - count;
3561 	memset(i_port_id, 0, leading_zero_bytes);
3562 	rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
3563 	if (rc < 0)
3564 		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
3565 	ret = 0;
3566 out:
3567 	return ret;
3568 }
3569 
3570 /*
3571  * configfs callback function invoked for
3572  * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3573  */
3574 static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
3575 					     struct config_group *group,
3576 					     const char *name)
3577 {
3578 	struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3579 	struct se_node_acl *se_nacl, *se_nacl_new;
3580 	struct srpt_node_acl *nacl;
3581 	int ret = 0;
3582 	u32 nexus_depth = 1;
3583 	u8 i_port_id[16];
3584 
3585 	if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3586 		printk(KERN_ERR "invalid initiator port ID %s\n", name);
3587 		ret = -EINVAL;
3588 		goto err;
3589 	}
3590 
3591 	se_nacl_new = srpt_alloc_fabric_acl(tpg);
3592 	if (!se_nacl_new) {
3593 		ret = -ENOMEM;
3594 		goto err;
3595 	}
3596 	/*
3597 	 * nacl_new may be released by core_tpg_add_initiator_node_acl()
3598 	 * when converting a node ACL from demo mode to explict
3599 	 */
3600 	se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
3601 						  nexus_depth);
3602 	if (IS_ERR(se_nacl)) {
3603 		ret = PTR_ERR(se_nacl);
3604 		goto err;
3605 	}
3606 	/* Locate our struct srpt_node_acl and set sdev and i_port_id. */
3607 	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3608 	memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
3609 	nacl->sport = sport;
3610 
3611 	spin_lock_irq(&sport->port_acl_lock);
3612 	list_add_tail(&nacl->list, &sport->port_acl_list);
3613 	spin_unlock_irq(&sport->port_acl_lock);
3614 
3615 	return se_nacl;
3616 err:
3617 	return ERR_PTR(ret);
3618 }
3619 
3620 /*
3621  * configfs callback function invoked for
3622  * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3623  */
3624 static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
3625 {
3626 	struct srpt_node_acl *nacl;
3627 	struct srpt_device *sdev;
3628 	struct srpt_port *sport;
3629 
3630 	nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3631 	sport = nacl->sport;
3632 	sdev = sport->sdev;
3633 	spin_lock_irq(&sport->port_acl_lock);
3634 	list_del(&nacl->list);
3635 	spin_unlock_irq(&sport->port_acl_lock);
3636 	core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
3637 	srpt_release_fabric_acl(NULL, se_nacl);
3638 }
3639 
3640 static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
3641 	struct se_portal_group *se_tpg,
3642 	char *page)
3643 {
3644 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3645 
3646 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3647 }
3648 
3649 static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
3650 	struct se_portal_group *se_tpg,
3651 	const char *page,
3652 	size_t count)
3653 {
3654 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3655 	unsigned long val;
3656 	int ret;
3657 
3658 	ret = strict_strtoul(page, 0, &val);
3659 	if (ret < 0) {
3660 		pr_err("strict_strtoul() failed with ret: %d\n", ret);
3661 		return -EINVAL;
3662 	}
3663 	if (val > MAX_SRPT_RDMA_SIZE) {
3664 		pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3665 			MAX_SRPT_RDMA_SIZE);
3666 		return -EINVAL;
3667 	}
3668 	if (val < DEFAULT_MAX_RDMA_SIZE) {
3669 		pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3670 			val, DEFAULT_MAX_RDMA_SIZE);
3671 		return -EINVAL;
3672 	}
3673 	sport->port_attrib.srp_max_rdma_size = val;
3674 
3675 	return count;
3676 }
3677 
3678 TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
3679 
3680 static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
3681 	struct se_portal_group *se_tpg,
3682 	char *page)
3683 {
3684 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3685 
3686 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3687 }
3688 
3689 static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
3690 	struct se_portal_group *se_tpg,
3691 	const char *page,
3692 	size_t count)
3693 {
3694 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3695 	unsigned long val;
3696 	int ret;
3697 
3698 	ret = strict_strtoul(page, 0, &val);
3699 	if (ret < 0) {
3700 		pr_err("strict_strtoul() failed with ret: %d\n", ret);
3701 		return -EINVAL;
3702 	}
3703 	if (val > MAX_SRPT_RSP_SIZE) {
3704 		pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3705 			MAX_SRPT_RSP_SIZE);
3706 		return -EINVAL;
3707 	}
3708 	if (val < MIN_MAX_RSP_SIZE) {
3709 		pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3710 			MIN_MAX_RSP_SIZE);
3711 		return -EINVAL;
3712 	}
3713 	sport->port_attrib.srp_max_rsp_size = val;
3714 
3715 	return count;
3716 }
3717 
3718 TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
3719 
3720 static ssize_t srpt_tpg_attrib_show_srp_sq_size(
3721 	struct se_portal_group *se_tpg,
3722 	char *page)
3723 {
3724 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3725 
3726 	return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3727 }
3728 
3729 static ssize_t srpt_tpg_attrib_store_srp_sq_size(
3730 	struct se_portal_group *se_tpg,
3731 	const char *page,
3732 	size_t count)
3733 {
3734 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3735 	unsigned long val;
3736 	int ret;
3737 
3738 	ret = strict_strtoul(page, 0, &val);
3739 	if (ret < 0) {
3740 		pr_err("strict_strtoul() failed with ret: %d\n", ret);
3741 		return -EINVAL;
3742 	}
3743 	if (val > MAX_SRPT_SRQ_SIZE) {
3744 		pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3745 			MAX_SRPT_SRQ_SIZE);
3746 		return -EINVAL;
3747 	}
3748 	if (val < MIN_SRPT_SRQ_SIZE) {
3749 		pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3750 			MIN_SRPT_SRQ_SIZE);
3751 		return -EINVAL;
3752 	}
3753 	sport->port_attrib.srp_sq_size = val;
3754 
3755 	return count;
3756 }
3757 
3758 TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
3759 
3760 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3761 	&srpt_tpg_attrib_srp_max_rdma_size.attr,
3762 	&srpt_tpg_attrib_srp_max_rsp_size.attr,
3763 	&srpt_tpg_attrib_srp_sq_size.attr,
3764 	NULL,
3765 };
3766 
3767 static ssize_t srpt_tpg_show_enable(
3768 	struct se_portal_group *se_tpg,
3769 	char *page)
3770 {
3771 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3772 
3773 	return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3774 }
3775 
3776 static ssize_t srpt_tpg_store_enable(
3777 	struct se_portal_group *se_tpg,
3778 	const char *page,
3779 	size_t count)
3780 {
3781 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3782 	unsigned long tmp;
3783         int ret;
3784 
3785 	ret = strict_strtoul(page, 0, &tmp);
3786 	if (ret < 0) {
3787 		printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
3788 		return -EINVAL;
3789 	}
3790 
3791 	if ((tmp != 0) && (tmp != 1)) {
3792 		printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3793 		return -EINVAL;
3794 	}
3795 	if (tmp == 1)
3796 		sport->enabled = true;
3797 	else
3798 		sport->enabled = false;
3799 
3800 	return count;
3801 }
3802 
3803 TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
3804 
3805 static struct configfs_attribute *srpt_tpg_attrs[] = {
3806 	&srpt_tpg_enable.attr,
3807 	NULL,
3808 };
3809 
3810 /**
3811  * configfs callback invoked for
3812  * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3813  */
3814 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3815 					     struct config_group *group,
3816 					     const char *name)
3817 {
3818 	struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3819 	int res;
3820 
3821 	/* Initialize sport->port_wwn and sport->port_tpg_1 */
3822 	res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
3823 			&sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
3824 	if (res)
3825 		return ERR_PTR(res);
3826 
3827 	return &sport->port_tpg_1;
3828 }
3829 
3830 /**
3831  * configfs callback invoked for
3832  * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3833  */
3834 static void srpt_drop_tpg(struct se_portal_group *tpg)
3835 {
3836 	struct srpt_port *sport = container_of(tpg,
3837 				struct srpt_port, port_tpg_1);
3838 
3839 	sport->enabled = false;
3840 	core_tpg_deregister(&sport->port_tpg_1);
3841 }
3842 
3843 /**
3844  * configfs callback invoked for
3845  * mkdir /sys/kernel/config/target/$driver/$port
3846  */
3847 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3848 				      struct config_group *group,
3849 				      const char *name)
3850 {
3851 	struct srpt_port *sport;
3852 	int ret;
3853 
3854 	sport = srpt_lookup_port(name);
3855 	pr_debug("make_tport(%s)\n", name);
3856 	ret = -EINVAL;
3857 	if (!sport)
3858 		goto err;
3859 
3860 	return &sport->port_wwn;
3861 
3862 err:
3863 	return ERR_PTR(ret);
3864 }
3865 
3866 /**
3867  * configfs callback invoked for
3868  * rmdir /sys/kernel/config/target/$driver/$port
3869  */
3870 static void srpt_drop_tport(struct se_wwn *wwn)
3871 {
3872 	struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3873 
3874 	pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3875 }
3876 
3877 static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
3878 					      char *buf)
3879 {
3880 	return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3881 }
3882 
3883 TF_WWN_ATTR_RO(srpt, version);
3884 
3885 static struct configfs_attribute *srpt_wwn_attrs[] = {
3886 	&srpt_wwn_version.attr,
3887 	NULL,
3888 };
3889 
3890 static struct target_core_fabric_ops srpt_template = {
3891 	.get_fabric_name		= srpt_get_fabric_name,
3892 	.get_fabric_proto_ident		= srpt_get_fabric_proto_ident,
3893 	.tpg_get_wwn			= srpt_get_fabric_wwn,
3894 	.tpg_get_tag			= srpt_get_tag,
3895 	.tpg_get_default_depth		= srpt_get_default_depth,
3896 	.tpg_get_pr_transport_id	= srpt_get_pr_transport_id,
3897 	.tpg_get_pr_transport_id_len	= srpt_get_pr_transport_id_len,
3898 	.tpg_parse_pr_out_transport_id	= srpt_parse_pr_out_transport_id,
3899 	.tpg_check_demo_mode		= srpt_check_false,
3900 	.tpg_check_demo_mode_cache	= srpt_check_true,
3901 	.tpg_check_demo_mode_write_protect = srpt_check_true,
3902 	.tpg_check_prod_mode_write_protect = srpt_check_false,
3903 	.tpg_alloc_fabric_acl		= srpt_alloc_fabric_acl,
3904 	.tpg_release_fabric_acl		= srpt_release_fabric_acl,
3905 	.tpg_get_inst_index		= srpt_tpg_get_inst_index,
3906 	.release_cmd			= srpt_release_cmd,
3907 	.check_stop_free		= srpt_check_stop_free,
3908 	.shutdown_session		= srpt_shutdown_session,
3909 	.close_session			= srpt_close_session,
3910 	.sess_get_index			= srpt_sess_get_index,
3911 	.sess_get_initiator_sid		= NULL,
3912 	.write_pending			= srpt_write_pending,
3913 	.write_pending_status		= srpt_write_pending_status,
3914 	.set_default_node_attributes	= srpt_set_default_node_attrs,
3915 	.get_task_tag			= srpt_get_task_tag,
3916 	.get_cmd_state			= srpt_get_tcm_cmd_state,
3917 	.queue_data_in			= srpt_queue_response,
3918 	.queue_status			= srpt_queue_status,
3919 	.queue_tm_rsp			= srpt_queue_response,
3920 	/*
3921 	 * Setup function pointers for generic logic in
3922 	 * target_core_fabric_configfs.c
3923 	 */
3924 	.fabric_make_wwn		= srpt_make_tport,
3925 	.fabric_drop_wwn		= srpt_drop_tport,
3926 	.fabric_make_tpg		= srpt_make_tpg,
3927 	.fabric_drop_tpg		= srpt_drop_tpg,
3928 	.fabric_post_link		= NULL,
3929 	.fabric_pre_unlink		= NULL,
3930 	.fabric_make_np			= NULL,
3931 	.fabric_drop_np			= NULL,
3932 	.fabric_make_nodeacl		= srpt_make_nodeacl,
3933 	.fabric_drop_nodeacl		= srpt_drop_nodeacl,
3934 };
3935 
3936 /**
3937  * srpt_init_module() - Kernel module initialization.
3938  *
3939  * Note: Since ib_register_client() registers callback functions, and since at
3940  * least one of these callback functions (srpt_add_one()) calls target core
3941  * functions, this driver must be registered with the target core before
3942  * ib_register_client() is called.
3943  */
3944 static int __init srpt_init_module(void)
3945 {
3946 	int ret;
3947 
3948 	ret = -EINVAL;
3949 	if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3950 		printk(KERN_ERR "invalid value %d for kernel module parameter"
3951 		       " srp_max_req_size -- must be at least %d.\n",
3952 		       srp_max_req_size, MIN_MAX_REQ_SIZE);
3953 		goto out;
3954 	}
3955 
3956 	if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3957 	    || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3958 		printk(KERN_ERR "invalid value %d for kernel module parameter"
3959 		       " srpt_srq_size -- must be in the range [%d..%d].\n",
3960 		       srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3961 		goto out;
3962 	}
3963 
3964 	srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
3965 	if (IS_ERR(srpt_target)) {
3966 		printk(KERN_ERR "couldn't register\n");
3967 		ret = PTR_ERR(srpt_target);
3968 		goto out;
3969 	}
3970 
3971 	srpt_target->tf_ops = srpt_template;
3972 
3973 	/*
3974 	 * Set up default attribute lists.
3975 	 */
3976 	srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
3977 	srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
3978 	srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
3979 	srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
3980 	srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
3981 	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
3982 	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
3983 	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
3984 	srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
3985 
3986 	ret = target_fabric_configfs_register(srpt_target);
3987 	if (ret < 0) {
3988 		printk(KERN_ERR "couldn't register\n");
3989 		goto out_free_target;
3990 	}
3991 
3992 	ret = ib_register_client(&srpt_client);
3993 	if (ret) {
3994 		printk(KERN_ERR "couldn't register IB client\n");
3995 		goto out_unregister_target;
3996 	}
3997 
3998 	return 0;
3999 
4000 out_unregister_target:
4001 	target_fabric_configfs_deregister(srpt_target);
4002 	srpt_target = NULL;
4003 out_free_target:
4004 	if (srpt_target)
4005 		target_fabric_configfs_free(srpt_target);
4006 out:
4007 	return ret;
4008 }
4009 
4010 static void __exit srpt_cleanup_module(void)
4011 {
4012 	ib_unregister_client(&srpt_client);
4013 	target_fabric_configfs_deregister(srpt_target);
4014 	srpt_target = NULL;
4015 }
4016 
4017 module_init(srpt_init_module);
4018 module_exit(srpt_cleanup_module);
4019