1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
46 
47 #include <linux/atomic.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
53 #include <scsi/srp.h>
54 #include <scsi/scsi_transport_srp.h>
55 
56 #include "ib_srp.h"
57 
58 #define DRV_NAME	"ib_srp"
59 #define PFX		DRV_NAME ": "
60 
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 
65 #if !defined(CONFIG_DYNAMIC_DEBUG)
66 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
68 #endif
69 
70 static unsigned int srp_sg_tablesize;
71 static unsigned int cmd_sg_entries;
72 static unsigned int indirect_sg_entries;
73 static bool allow_ext_sg;
74 static bool register_always = true;
75 static bool never_register;
76 static int topspin_workarounds = 1;
77 
78 module_param(srp_sg_tablesize, uint, 0444);
79 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
80 
81 module_param(cmd_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(cmd_sg_entries,
83 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
84 
85 module_param(indirect_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(indirect_sg_entries,
87 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
88 
89 module_param(allow_ext_sg, bool, 0444);
90 MODULE_PARM_DESC(allow_ext_sg,
91 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
92 
93 module_param(topspin_workarounds, int, 0444);
94 MODULE_PARM_DESC(topspin_workarounds,
95 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
96 
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 		 "Use memory registration even for contiguous memory regions");
100 
101 module_param(never_register, bool, 0444);
102 MODULE_PARM_DESC(never_register, "Never register memory");
103 
104 static const struct kernel_param_ops srp_tmo_ops;
105 
106 static int srp_reconnect_delay = 10;
107 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
108 		S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
110 
111 static int srp_fast_io_fail_tmo = 15;
112 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
113 		S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(fast_io_fail_tmo,
115 		 "Number of seconds between the observation of a transport"
116 		 " layer error and failing all I/O. \"off\" means that this"
117 		 " functionality is disabled.");
118 
119 static int srp_dev_loss_tmo = 600;
120 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
121 		S_IRUGO | S_IWUSR);
122 MODULE_PARM_DESC(dev_loss_tmo,
123 		 "Maximum number of seconds that the SRP transport should"
124 		 " insulate transport layer errors. After this time has been"
125 		 " exceeded the SCSI host is removed. Should be"
126 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 		 " this functionality is disabled.");
129 
130 static bool srp_use_imm_data = true;
131 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132 MODULE_PARM_DESC(use_imm_data,
133 		 "Whether or not to request permission to use immediate data during SRP login.");
134 
135 static unsigned int srp_max_imm_data = 8 * 1024;
136 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138 
139 static unsigned ch_count;
140 module_param(ch_count, uint, 0444);
141 MODULE_PARM_DESC(ch_count,
142 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143 
144 static int srp_add_one(struct ib_device *device);
145 static void srp_remove_one(struct ib_device *device, void *client_data);
146 static void srp_rename_dev(struct ib_device *device, void *client_data);
147 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
149 		const char *opname);
150 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151 			     const struct ib_cm_event *event);
152 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153 			       struct rdma_cm_event *event);
154 
155 static struct scsi_transport_template *ib_srp_transport_template;
156 static struct workqueue_struct *srp_remove_wq;
157 
158 static struct ib_client srp_client = {
159 	.name   = "srp",
160 	.add    = srp_add_one,
161 	.remove = srp_remove_one,
162 	.rename = srp_rename_dev
163 };
164 
165 static struct ib_sa_client srp_sa_client;
166 
167 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
168 {
169 	int tmo = *(int *)kp->arg;
170 
171 	if (tmo >= 0)
172 		return sysfs_emit(buffer, "%d\n", tmo);
173 	else
174 		return sysfs_emit(buffer, "off\n");
175 }
176 
177 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
178 {
179 	int tmo, res;
180 
181 	res = srp_parse_tmo(&tmo, val);
182 	if (res)
183 		goto out;
184 
185 	if (kp->arg == &srp_reconnect_delay)
186 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
187 				    srp_dev_loss_tmo);
188 	else if (kp->arg == &srp_fast_io_fail_tmo)
189 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
190 	else
191 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
192 				    tmo);
193 	if (res)
194 		goto out;
195 	*(int *)kp->arg = tmo;
196 
197 out:
198 	return res;
199 }
200 
201 static const struct kernel_param_ops srp_tmo_ops = {
202 	.get = srp_tmo_get,
203 	.set = srp_tmo_set,
204 };
205 
206 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
207 {
208 	return (struct srp_target_port *) host->hostdata;
209 }
210 
211 static const char *srp_target_info(struct Scsi_Host *host)
212 {
213 	return host_to_target(host)->target_name;
214 }
215 
216 static int srp_target_is_topspin(struct srp_target_port *target)
217 {
218 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
219 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
220 
221 	return topspin_workarounds &&
222 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
224 }
225 
226 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
227 				   gfp_t gfp_mask,
228 				   enum dma_data_direction direction)
229 {
230 	struct srp_iu *iu;
231 
232 	iu = kmalloc(sizeof *iu, gfp_mask);
233 	if (!iu)
234 		goto out;
235 
236 	iu->buf = kzalloc(size, gfp_mask);
237 	if (!iu->buf)
238 		goto out_free_iu;
239 
240 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
241 				    direction);
242 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
243 		goto out_free_buf;
244 
245 	iu->size      = size;
246 	iu->direction = direction;
247 
248 	return iu;
249 
250 out_free_buf:
251 	kfree(iu->buf);
252 out_free_iu:
253 	kfree(iu);
254 out:
255 	return NULL;
256 }
257 
258 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
259 {
260 	if (!iu)
261 		return;
262 
263 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 			    iu->direction);
265 	kfree(iu->buf);
266 	kfree(iu);
267 }
268 
269 static void srp_qp_event(struct ib_event *event, void *context)
270 {
271 	pr_debug("QP event %s (%d)\n",
272 		 ib_event_msg(event->event), event->event);
273 }
274 
275 static int srp_init_ib_qp(struct srp_target_port *target,
276 			  struct ib_qp *qp)
277 {
278 	struct ib_qp_attr *attr;
279 	int ret;
280 
281 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
282 	if (!attr)
283 		return -ENOMEM;
284 
285 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286 				  target->srp_host->port,
287 				  be16_to_cpu(target->ib_cm.pkey),
288 				  &attr->pkey_index);
289 	if (ret)
290 		goto out;
291 
292 	attr->qp_state        = IB_QPS_INIT;
293 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294 				    IB_ACCESS_REMOTE_WRITE);
295 	attr->port_num        = target->srp_host->port;
296 
297 	ret = ib_modify_qp(qp, attr,
298 			   IB_QP_STATE		|
299 			   IB_QP_PKEY_INDEX	|
300 			   IB_QP_ACCESS_FLAGS	|
301 			   IB_QP_PORT);
302 
303 out:
304 	kfree(attr);
305 	return ret;
306 }
307 
308 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
309 {
310 	struct srp_target_port *target = ch->target;
311 	struct ib_cm_id *new_cm_id;
312 
313 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
314 				    srp_ib_cm_handler, ch);
315 	if (IS_ERR(new_cm_id))
316 		return PTR_ERR(new_cm_id);
317 
318 	if (ch->ib_cm.cm_id)
319 		ib_destroy_cm_id(ch->ib_cm.cm_id);
320 	ch->ib_cm.cm_id = new_cm_id;
321 	if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322 			    target->srp_host->port))
323 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
324 	else
325 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326 	ch->ib_cm.path.sgid = target->sgid;
327 	ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328 	ch->ib_cm.path.pkey = target->ib_cm.pkey;
329 	ch->ib_cm.path.service_id = target->ib_cm.service_id;
330 
331 	return 0;
332 }
333 
334 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
335 {
336 	struct srp_target_port *target = ch->target;
337 	struct rdma_cm_id *new_cm_id;
338 	int ret;
339 
340 	new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341 				   RDMA_PS_TCP, IB_QPT_RC);
342 	if (IS_ERR(new_cm_id)) {
343 		ret = PTR_ERR(new_cm_id);
344 		new_cm_id = NULL;
345 		goto out;
346 	}
347 
348 	init_completion(&ch->done);
349 	ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
350 				&target->rdma_cm.src.sa : NULL,
351 				&target->rdma_cm.dst.sa,
352 				SRP_PATH_REC_TIMEOUT_MS);
353 	if (ret) {
354 		pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355 		       &target->rdma_cm.src, &target->rdma_cm.dst, ret);
356 		goto out;
357 	}
358 	ret = wait_for_completion_interruptible(&ch->done);
359 	if (ret < 0)
360 		goto out;
361 
362 	ret = ch->status;
363 	if (ret) {
364 		pr_err("Resolving address %pISpsc failed (%d)\n",
365 		       &target->rdma_cm.dst, ret);
366 		goto out;
367 	}
368 
369 	swap(ch->rdma_cm.cm_id, new_cm_id);
370 
371 out:
372 	if (new_cm_id)
373 		rdma_destroy_id(new_cm_id);
374 
375 	return ret;
376 }
377 
378 static int srp_new_cm_id(struct srp_rdma_ch *ch)
379 {
380 	struct srp_target_port *target = ch->target;
381 
382 	return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383 		srp_new_ib_cm_id(ch);
384 }
385 
386 /**
387  * srp_destroy_fr_pool() - free the resources owned by a pool
388  * @pool: Fast registration pool to be destroyed.
389  */
390 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
391 {
392 	int i;
393 	struct srp_fr_desc *d;
394 
395 	if (!pool)
396 		return;
397 
398 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
399 		if (d->mr)
400 			ib_dereg_mr(d->mr);
401 	}
402 	kfree(pool);
403 }
404 
405 /**
406  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407  * @device:            IB device to allocate fast registration descriptors for.
408  * @pd:                Protection domain associated with the FR descriptors.
409  * @pool_size:         Number of descriptors to allocate.
410  * @max_page_list_len: Maximum fast registration work request page list length.
411  */
412 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413 					      struct ib_pd *pd, int pool_size,
414 					      int max_page_list_len)
415 {
416 	struct srp_fr_pool *pool;
417 	struct srp_fr_desc *d;
418 	struct ib_mr *mr;
419 	int i, ret = -EINVAL;
420 	enum ib_mr_type mr_type;
421 
422 	if (pool_size <= 0)
423 		goto err;
424 	ret = -ENOMEM;
425 	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
426 	if (!pool)
427 		goto err;
428 	pool->size = pool_size;
429 	pool->max_page_list_len = max_page_list_len;
430 	spin_lock_init(&pool->lock);
431 	INIT_LIST_HEAD(&pool->free_list);
432 
433 	if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
434 		mr_type = IB_MR_TYPE_SG_GAPS;
435 	else
436 		mr_type = IB_MR_TYPE_MEM_REG;
437 
438 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
439 		mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
440 		if (IS_ERR(mr)) {
441 			ret = PTR_ERR(mr);
442 			if (ret == -ENOMEM)
443 				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 					dev_name(&device->dev));
445 			goto destroy_pool;
446 		}
447 		d->mr = mr;
448 		list_add_tail(&d->entry, &pool->free_list);
449 	}
450 
451 out:
452 	return pool;
453 
454 destroy_pool:
455 	srp_destroy_fr_pool(pool);
456 
457 err:
458 	pool = ERR_PTR(ret);
459 	goto out;
460 }
461 
462 /**
463  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464  * @pool: Pool to obtain descriptor from.
465  */
466 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
467 {
468 	struct srp_fr_desc *d = NULL;
469 	unsigned long flags;
470 
471 	spin_lock_irqsave(&pool->lock, flags);
472 	if (!list_empty(&pool->free_list)) {
473 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
474 		list_del(&d->entry);
475 	}
476 	spin_unlock_irqrestore(&pool->lock, flags);
477 
478 	return d;
479 }
480 
481 /**
482  * srp_fr_pool_put() - put an FR descriptor back in the free list
483  * @pool: Pool the descriptor was allocated from.
484  * @desc: Pointer to an array of fast registration descriptor pointers.
485  * @n:    Number of descriptors to put back.
486  *
487  * Note: The caller must already have queued an invalidation request for
488  * desc->mr->rkey before calling this function.
489  */
490 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491 			    int n)
492 {
493 	unsigned long flags;
494 	int i;
495 
496 	spin_lock_irqsave(&pool->lock, flags);
497 	for (i = 0; i < n; i++)
498 		list_add(&desc[i]->entry, &pool->free_list);
499 	spin_unlock_irqrestore(&pool->lock, flags);
500 }
501 
502 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
503 {
504 	struct srp_device *dev = target->srp_host->srp_dev;
505 
506 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
507 				  dev->max_pages_per_mr);
508 }
509 
510 /**
511  * srp_destroy_qp() - destroy an RDMA queue pair
512  * @ch: SRP RDMA channel.
513  *
514  * Drain the qp before destroying it.  This avoids that the receive
515  * completion handler can access the queue pair while it is
516  * being destroyed.
517  */
518 static void srp_destroy_qp(struct srp_rdma_ch *ch)
519 {
520 	spin_lock_irq(&ch->lock);
521 	ib_process_cq_direct(ch->send_cq, -1);
522 	spin_unlock_irq(&ch->lock);
523 
524 	ib_drain_qp(ch->qp);
525 	ib_destroy_qp(ch->qp);
526 }
527 
528 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
529 {
530 	struct srp_target_port *target = ch->target;
531 	struct srp_device *dev = target->srp_host->srp_dev;
532 	const struct ib_device_attr *attr = &dev->dev->attrs;
533 	struct ib_qp_init_attr *init_attr;
534 	struct ib_cq *recv_cq, *send_cq;
535 	struct ib_qp *qp;
536 	struct srp_fr_pool *fr_pool = NULL;
537 	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
538 	int ret;
539 
540 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
541 	if (!init_attr)
542 		return -ENOMEM;
543 
544 	/* queue_size + 1 for ib_drain_rq() */
545 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546 				ch->comp_vector, IB_POLL_SOFTIRQ);
547 	if (IS_ERR(recv_cq)) {
548 		ret = PTR_ERR(recv_cq);
549 		goto err;
550 	}
551 
552 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553 				ch->comp_vector, IB_POLL_DIRECT);
554 	if (IS_ERR(send_cq)) {
555 		ret = PTR_ERR(send_cq);
556 		goto err_recv_cq;
557 	}
558 
559 	init_attr->event_handler       = srp_qp_event;
560 	init_attr->cap.max_send_wr     = m * target->queue_size;
561 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
562 	init_attr->cap.max_recv_sge    = 1;
563 	init_attr->cap.max_send_sge    = min(SRP_MAX_SGE, attr->max_send_sge);
564 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
565 	init_attr->qp_type             = IB_QPT_RC;
566 	init_attr->send_cq             = send_cq;
567 	init_attr->recv_cq             = recv_cq;
568 
569 	ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
570 
571 	if (target->using_rdma_cm) {
572 		ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573 		qp = ch->rdma_cm.cm_id->qp;
574 	} else {
575 		qp = ib_create_qp(dev->pd, init_attr);
576 		if (!IS_ERR(qp)) {
577 			ret = srp_init_ib_qp(target, qp);
578 			if (ret)
579 				ib_destroy_qp(qp);
580 		} else {
581 			ret = PTR_ERR(qp);
582 		}
583 	}
584 	if (ret) {
585 		pr_err("QP creation failed for dev %s: %d\n",
586 		       dev_name(&dev->dev->dev), ret);
587 		goto err_send_cq;
588 	}
589 
590 	if (dev->use_fast_reg) {
591 		fr_pool = srp_alloc_fr_pool(target);
592 		if (IS_ERR(fr_pool)) {
593 			ret = PTR_ERR(fr_pool);
594 			shost_printk(KERN_WARNING, target->scsi_host, PFX
595 				     "FR pool allocation failed (%d)\n", ret);
596 			goto err_qp;
597 		}
598 	}
599 
600 	if (ch->qp)
601 		srp_destroy_qp(ch);
602 	if (ch->recv_cq)
603 		ib_free_cq(ch->recv_cq);
604 	if (ch->send_cq)
605 		ib_free_cq(ch->send_cq);
606 
607 	ch->qp = qp;
608 	ch->recv_cq = recv_cq;
609 	ch->send_cq = send_cq;
610 
611 	if (dev->use_fast_reg) {
612 		if (ch->fr_pool)
613 			srp_destroy_fr_pool(ch->fr_pool);
614 		ch->fr_pool = fr_pool;
615 	}
616 
617 	kfree(init_attr);
618 	return 0;
619 
620 err_qp:
621 	if (target->using_rdma_cm)
622 		rdma_destroy_qp(ch->rdma_cm.cm_id);
623 	else
624 		ib_destroy_qp(qp);
625 
626 err_send_cq:
627 	ib_free_cq(send_cq);
628 
629 err_recv_cq:
630 	ib_free_cq(recv_cq);
631 
632 err:
633 	kfree(init_attr);
634 	return ret;
635 }
636 
637 /*
638  * Note: this function may be called without srp_alloc_iu_bufs() having been
639  * invoked. Hence the ch->[rt]x_ring checks.
640  */
641 static void srp_free_ch_ib(struct srp_target_port *target,
642 			   struct srp_rdma_ch *ch)
643 {
644 	struct srp_device *dev = target->srp_host->srp_dev;
645 	int i;
646 
647 	if (!ch->target)
648 		return;
649 
650 	if (target->using_rdma_cm) {
651 		if (ch->rdma_cm.cm_id) {
652 			rdma_destroy_id(ch->rdma_cm.cm_id);
653 			ch->rdma_cm.cm_id = NULL;
654 		}
655 	} else {
656 		if (ch->ib_cm.cm_id) {
657 			ib_destroy_cm_id(ch->ib_cm.cm_id);
658 			ch->ib_cm.cm_id = NULL;
659 		}
660 	}
661 
662 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
663 	if (!ch->qp)
664 		return;
665 
666 	if (dev->use_fast_reg) {
667 		if (ch->fr_pool)
668 			srp_destroy_fr_pool(ch->fr_pool);
669 	}
670 
671 	srp_destroy_qp(ch);
672 	ib_free_cq(ch->send_cq);
673 	ib_free_cq(ch->recv_cq);
674 
675 	/*
676 	 * Avoid that the SCSI error handler tries to use this channel after
677 	 * it has been freed. The SCSI error handler can namely continue
678 	 * trying to perform recovery actions after scsi_remove_host()
679 	 * returned.
680 	 */
681 	ch->target = NULL;
682 
683 	ch->qp = NULL;
684 	ch->send_cq = ch->recv_cq = NULL;
685 
686 	if (ch->rx_ring) {
687 		for (i = 0; i < target->queue_size; ++i)
688 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
689 		kfree(ch->rx_ring);
690 		ch->rx_ring = NULL;
691 	}
692 	if (ch->tx_ring) {
693 		for (i = 0; i < target->queue_size; ++i)
694 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
695 		kfree(ch->tx_ring);
696 		ch->tx_ring = NULL;
697 	}
698 }
699 
700 static void srp_path_rec_completion(int status,
701 				    struct sa_path_rec *pathrec,
702 				    void *ch_ptr)
703 {
704 	struct srp_rdma_ch *ch = ch_ptr;
705 	struct srp_target_port *target = ch->target;
706 
707 	ch->status = status;
708 	if (status)
709 		shost_printk(KERN_ERR, target->scsi_host,
710 			     PFX "Got failed path rec status %d\n", status);
711 	else
712 		ch->ib_cm.path = *pathrec;
713 	complete(&ch->done);
714 }
715 
716 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
717 {
718 	struct srp_target_port *target = ch->target;
719 	int ret;
720 
721 	ch->ib_cm.path.numb_path = 1;
722 
723 	init_completion(&ch->done);
724 
725 	ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
726 					       target->srp_host->srp_dev->dev,
727 					       target->srp_host->port,
728 					       &ch->ib_cm.path,
729 					       IB_SA_PATH_REC_SERVICE_ID |
730 					       IB_SA_PATH_REC_DGID	 |
731 					       IB_SA_PATH_REC_SGID	 |
732 					       IB_SA_PATH_REC_NUMB_PATH	 |
733 					       IB_SA_PATH_REC_PKEY,
734 					       SRP_PATH_REC_TIMEOUT_MS,
735 					       GFP_KERNEL,
736 					       srp_path_rec_completion,
737 					       ch, &ch->ib_cm.path_query);
738 	if (ch->ib_cm.path_query_id < 0)
739 		return ch->ib_cm.path_query_id;
740 
741 	ret = wait_for_completion_interruptible(&ch->done);
742 	if (ret < 0)
743 		return ret;
744 
745 	if (ch->status < 0)
746 		shost_printk(KERN_WARNING, target->scsi_host,
747 			     PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748 			     ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749 			     be16_to_cpu(target->ib_cm.pkey),
750 			     be64_to_cpu(target->ib_cm.service_id));
751 
752 	return ch->status;
753 }
754 
755 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
756 {
757 	struct srp_target_port *target = ch->target;
758 	int ret;
759 
760 	init_completion(&ch->done);
761 
762 	ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
763 	if (ret)
764 		return ret;
765 
766 	wait_for_completion_interruptible(&ch->done);
767 
768 	if (ch->status != 0)
769 		shost_printk(KERN_WARNING, target->scsi_host,
770 			     PFX "Path resolution failed\n");
771 
772 	return ch->status;
773 }
774 
775 static int srp_lookup_path(struct srp_rdma_ch *ch)
776 {
777 	struct srp_target_port *target = ch->target;
778 
779 	return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780 		srp_ib_lookup_path(ch);
781 }
782 
783 static u8 srp_get_subnet_timeout(struct srp_host *host)
784 {
785 	struct ib_port_attr attr;
786 	int ret;
787 	u8 subnet_timeout = 18;
788 
789 	ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790 	if (ret == 0)
791 		subnet_timeout = attr.subnet_timeout;
792 
793 	if (unlikely(subnet_timeout < 15))
794 		pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 			dev_name(&host->srp_dev->dev->dev), subnet_timeout);
796 
797 	return subnet_timeout;
798 }
799 
800 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801 			bool multich)
802 {
803 	struct srp_target_port *target = ch->target;
804 	struct {
805 		struct rdma_conn_param	  rdma_param;
806 		struct srp_login_req_rdma rdma_req;
807 		struct ib_cm_req_param	  ib_param;
808 		struct srp_login_req	  ib_req;
809 	} *req = NULL;
810 	char *ipi, *tpi;
811 	int status;
812 
813 	req = kzalloc(sizeof *req, GFP_KERNEL);
814 	if (!req)
815 		return -ENOMEM;
816 
817 	req->ib_param.flow_control = 1;
818 	req->ib_param.retry_count = target->tl_retry_count;
819 
820 	/*
821 	 * Pick some arbitrary defaults here; we could make these
822 	 * module parameters if anyone cared about setting them.
823 	 */
824 	req->ib_param.responder_resources = 4;
825 	req->ib_param.rnr_retry_count = 7;
826 	req->ib_param.max_cm_retries = 15;
827 
828 	req->ib_req.opcode = SRP_LOGIN_REQ;
829 	req->ib_req.tag = 0;
830 	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
831 	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
832 					      SRP_BUF_FORMAT_INDIRECT);
833 	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834 				 SRP_MULTICHAN_SINGLE);
835 	if (srp_use_imm_data) {
836 		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837 		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838 	}
839 
840 	if (target->using_rdma_cm) {
841 		req->rdma_param.flow_control = req->ib_param.flow_control;
842 		req->rdma_param.responder_resources =
843 			req->ib_param.responder_resources;
844 		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845 		req->rdma_param.retry_count = req->ib_param.retry_count;
846 		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847 		req->rdma_param.private_data = &req->rdma_req;
848 		req->rdma_param.private_data_len = sizeof(req->rdma_req);
849 
850 		req->rdma_req.opcode = req->ib_req.opcode;
851 		req->rdma_req.tag = req->ib_req.tag;
852 		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853 		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854 		req->rdma_req.req_flags	= req->ib_req.req_flags;
855 		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
856 
857 		ipi = req->rdma_req.initiator_port_id;
858 		tpi = req->rdma_req.target_port_id;
859 	} else {
860 		u8 subnet_timeout;
861 
862 		subnet_timeout = srp_get_subnet_timeout(target->srp_host);
863 
864 		req->ib_param.primary_path = &ch->ib_cm.path;
865 		req->ib_param.alternate_path = NULL;
866 		req->ib_param.service_id = target->ib_cm.service_id;
867 		get_random_bytes(&req->ib_param.starting_psn, 4);
868 		req->ib_param.starting_psn &= 0xffffff;
869 		req->ib_param.qp_num = ch->qp->qp_num;
870 		req->ib_param.qp_type = ch->qp->qp_type;
871 		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872 		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873 		req->ib_param.private_data = &req->ib_req;
874 		req->ib_param.private_data_len = sizeof(req->ib_req);
875 
876 		ipi = req->ib_req.initiator_port_id;
877 		tpi = req->ib_req.target_port_id;
878 	}
879 
880 	/*
881 	 * In the published SRP specification (draft rev. 16a), the
882 	 * port identifier format is 8 bytes of ID extension followed
883 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
884 	 * opposite order, so that the GUID comes first.
885 	 *
886 	 * Targets conforming to these obsolete drafts can be
887 	 * recognized by the I/O Class they report.
888 	 */
889 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
890 		memcpy(ipi,     &target->sgid.global.interface_id, 8);
891 		memcpy(ipi + 8, &target->initiator_ext, 8);
892 		memcpy(tpi,     &target->ioc_guid, 8);
893 		memcpy(tpi + 8, &target->id_ext, 8);
894 	} else {
895 		memcpy(ipi,     &target->initiator_ext, 8);
896 		memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897 		memcpy(tpi,     &target->id_ext, 8);
898 		memcpy(tpi + 8, &target->ioc_guid, 8);
899 	}
900 
901 	/*
902 	 * Topspin/Cisco SRP targets will reject our login unless we
903 	 * zero out the first 8 bytes of our initiator port ID and set
904 	 * the second 8 bytes to the local node GUID.
905 	 */
906 	if (srp_target_is_topspin(target)) {
907 		shost_printk(KERN_DEBUG, target->scsi_host,
908 			     PFX "Topspin/Cisco initiator port ID workaround "
909 			     "activated for target GUID %016llx\n",
910 			     be64_to_cpu(target->ioc_guid));
911 		memset(ipi, 0, 8);
912 		memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
913 	}
914 
915 	if (target->using_rdma_cm)
916 		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
917 	else
918 		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
919 
920 	kfree(req);
921 
922 	return status;
923 }
924 
925 static bool srp_queue_remove_work(struct srp_target_port *target)
926 {
927 	bool changed = false;
928 
929 	spin_lock_irq(&target->lock);
930 	if (target->state != SRP_TARGET_REMOVED) {
931 		target->state = SRP_TARGET_REMOVED;
932 		changed = true;
933 	}
934 	spin_unlock_irq(&target->lock);
935 
936 	if (changed)
937 		queue_work(srp_remove_wq, &target->remove_work);
938 
939 	return changed;
940 }
941 
942 static void srp_disconnect_target(struct srp_target_port *target)
943 {
944 	struct srp_rdma_ch *ch;
945 	int i, ret;
946 
947 	/* XXX should send SRP_I_LOGOUT request */
948 
949 	for (i = 0; i < target->ch_count; i++) {
950 		ch = &target->ch[i];
951 		ch->connected = false;
952 		ret = 0;
953 		if (target->using_rdma_cm) {
954 			if (ch->rdma_cm.cm_id)
955 				rdma_disconnect(ch->rdma_cm.cm_id);
956 		} else {
957 			if (ch->ib_cm.cm_id)
958 				ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
959 						      NULL, 0);
960 		}
961 		if (ret < 0) {
962 			shost_printk(KERN_DEBUG, target->scsi_host,
963 				     PFX "Sending CM DREQ failed\n");
964 		}
965 	}
966 }
967 
968 static void srp_free_req_data(struct srp_target_port *target,
969 			      struct srp_rdma_ch *ch)
970 {
971 	struct srp_device *dev = target->srp_host->srp_dev;
972 	struct ib_device *ibdev = dev->dev;
973 	struct srp_request *req;
974 	int i;
975 
976 	if (!ch->req_ring)
977 		return;
978 
979 	for (i = 0; i < target->req_ring_size; ++i) {
980 		req = &ch->req_ring[i];
981 		if (dev->use_fast_reg)
982 			kfree(req->fr_list);
983 		if (req->indirect_dma_addr) {
984 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
985 					    target->indirect_size,
986 					    DMA_TO_DEVICE);
987 		}
988 		kfree(req->indirect_desc);
989 	}
990 
991 	kfree(ch->req_ring);
992 	ch->req_ring = NULL;
993 }
994 
995 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
996 {
997 	struct srp_target_port *target = ch->target;
998 	struct srp_device *srp_dev = target->srp_host->srp_dev;
999 	struct ib_device *ibdev = srp_dev->dev;
1000 	struct srp_request *req;
1001 	void *mr_list;
1002 	dma_addr_t dma_addr;
1003 	int i, ret = -ENOMEM;
1004 
1005 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
1006 			       GFP_KERNEL);
1007 	if (!ch->req_ring)
1008 		goto out;
1009 
1010 	for (i = 0; i < target->req_ring_size; ++i) {
1011 		req = &ch->req_ring[i];
1012 		mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
1013 					GFP_KERNEL);
1014 		if (!mr_list)
1015 			goto out;
1016 		if (srp_dev->use_fast_reg)
1017 			req->fr_list = mr_list;
1018 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1019 		if (!req->indirect_desc)
1020 			goto out;
1021 
1022 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1023 					     target->indirect_size,
1024 					     DMA_TO_DEVICE);
1025 		if (ib_dma_mapping_error(ibdev, dma_addr))
1026 			goto out;
1027 
1028 		req->indirect_dma_addr = dma_addr;
1029 	}
1030 	ret = 0;
1031 
1032 out:
1033 	return ret;
1034 }
1035 
1036 /**
1037  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1038  * @shost: SCSI host whose attributes to remove from sysfs.
1039  *
1040  * Note: Any attributes defined in the host template and that did not exist
1041  * before invocation of this function will be ignored.
1042  */
1043 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1044 {
1045 	struct device_attribute **attr;
1046 
1047 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1048 		device_remove_file(&shost->shost_dev, *attr);
1049 }
1050 
1051 static void srp_remove_target(struct srp_target_port *target)
1052 {
1053 	struct srp_rdma_ch *ch;
1054 	int i;
1055 
1056 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1057 
1058 	srp_del_scsi_host_attr(target->scsi_host);
1059 	srp_rport_get(target->rport);
1060 	srp_remove_host(target->scsi_host);
1061 	scsi_remove_host(target->scsi_host);
1062 	srp_stop_rport_timers(target->rport);
1063 	srp_disconnect_target(target);
1064 	kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1065 	for (i = 0; i < target->ch_count; i++) {
1066 		ch = &target->ch[i];
1067 		srp_free_ch_ib(target, ch);
1068 	}
1069 	cancel_work_sync(&target->tl_err_work);
1070 	srp_rport_put(target->rport);
1071 	for (i = 0; i < target->ch_count; i++) {
1072 		ch = &target->ch[i];
1073 		srp_free_req_data(target, ch);
1074 	}
1075 	kfree(target->ch);
1076 	target->ch = NULL;
1077 
1078 	spin_lock(&target->srp_host->target_lock);
1079 	list_del(&target->list);
1080 	spin_unlock(&target->srp_host->target_lock);
1081 
1082 	scsi_host_put(target->scsi_host);
1083 }
1084 
1085 static void srp_remove_work(struct work_struct *work)
1086 {
1087 	struct srp_target_port *target =
1088 		container_of(work, struct srp_target_port, remove_work);
1089 
1090 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1091 
1092 	srp_remove_target(target);
1093 }
1094 
1095 static void srp_rport_delete(struct srp_rport *rport)
1096 {
1097 	struct srp_target_port *target = rport->lld_data;
1098 
1099 	srp_queue_remove_work(target);
1100 }
1101 
1102 /**
1103  * srp_connected_ch() - number of connected channels
1104  * @target: SRP target port.
1105  */
1106 static int srp_connected_ch(struct srp_target_port *target)
1107 {
1108 	int i, c = 0;
1109 
1110 	for (i = 0; i < target->ch_count; i++)
1111 		c += target->ch[i].connected;
1112 
1113 	return c;
1114 }
1115 
1116 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1117 			  bool multich)
1118 {
1119 	struct srp_target_port *target = ch->target;
1120 	int ret;
1121 
1122 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1123 
1124 	ret = srp_lookup_path(ch);
1125 	if (ret)
1126 		goto out;
1127 
1128 	while (1) {
1129 		init_completion(&ch->done);
1130 		ret = srp_send_req(ch, max_iu_len, multich);
1131 		if (ret)
1132 			goto out;
1133 		ret = wait_for_completion_interruptible(&ch->done);
1134 		if (ret < 0)
1135 			goto out;
1136 
1137 		/*
1138 		 * The CM event handling code will set status to
1139 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1140 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1141 		 * redirect REJ back.
1142 		 */
1143 		ret = ch->status;
1144 		switch (ret) {
1145 		case 0:
1146 			ch->connected = true;
1147 			goto out;
1148 
1149 		case SRP_PORT_REDIRECT:
1150 			ret = srp_lookup_path(ch);
1151 			if (ret)
1152 				goto out;
1153 			break;
1154 
1155 		case SRP_DLID_REDIRECT:
1156 			break;
1157 
1158 		case SRP_STALE_CONN:
1159 			shost_printk(KERN_ERR, target->scsi_host, PFX
1160 				     "giving up on stale connection\n");
1161 			ret = -ECONNRESET;
1162 			goto out;
1163 
1164 		default:
1165 			goto out;
1166 		}
1167 	}
1168 
1169 out:
1170 	return ret <= 0 ? ret : -ENODEV;
1171 }
1172 
1173 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1174 {
1175 	srp_handle_qp_err(cq, wc, "INV RKEY");
1176 }
1177 
1178 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1179 		u32 rkey)
1180 {
1181 	struct ib_send_wr wr = {
1182 		.opcode		    = IB_WR_LOCAL_INV,
1183 		.next		    = NULL,
1184 		.num_sge	    = 0,
1185 		.send_flags	    = 0,
1186 		.ex.invalidate_rkey = rkey,
1187 	};
1188 
1189 	wr.wr_cqe = &req->reg_cqe;
1190 	req->reg_cqe.done = srp_inv_rkey_err_done;
1191 	return ib_post_send(ch->qp, &wr, NULL);
1192 }
1193 
1194 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1195 			   struct srp_rdma_ch *ch,
1196 			   struct srp_request *req)
1197 {
1198 	struct srp_target_port *target = ch->target;
1199 	struct srp_device *dev = target->srp_host->srp_dev;
1200 	struct ib_device *ibdev = dev->dev;
1201 	int i, res;
1202 
1203 	if (!scsi_sglist(scmnd) ||
1204 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1205 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1206 		return;
1207 
1208 	if (dev->use_fast_reg) {
1209 		struct srp_fr_desc **pfr;
1210 
1211 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1212 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1213 			if (res < 0) {
1214 				shost_printk(KERN_ERR, target->scsi_host, PFX
1215 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1216 				  (*pfr)->mr->rkey, res);
1217 				queue_work(system_long_wq,
1218 					   &target->tl_err_work);
1219 			}
1220 		}
1221 		if (req->nmdesc)
1222 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1223 					req->nmdesc);
1224 	}
1225 
1226 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1227 			scmnd->sc_data_direction);
1228 }
1229 
1230 /**
1231  * srp_claim_req - Take ownership of the scmnd associated with a request.
1232  * @ch: SRP RDMA channel.
1233  * @req: SRP request.
1234  * @sdev: If not NULL, only take ownership for this SCSI device.
1235  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1236  *         ownership of @req->scmnd if it equals @scmnd.
1237  *
1238  * Return value:
1239  * Either NULL or a pointer to the SCSI command the caller became owner of.
1240  */
1241 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1242 				       struct srp_request *req,
1243 				       struct scsi_device *sdev,
1244 				       struct scsi_cmnd *scmnd)
1245 {
1246 	unsigned long flags;
1247 
1248 	spin_lock_irqsave(&ch->lock, flags);
1249 	if (req->scmnd &&
1250 	    (!sdev || req->scmnd->device == sdev) &&
1251 	    (!scmnd || req->scmnd == scmnd)) {
1252 		scmnd = req->scmnd;
1253 		req->scmnd = NULL;
1254 	} else {
1255 		scmnd = NULL;
1256 	}
1257 	spin_unlock_irqrestore(&ch->lock, flags);
1258 
1259 	return scmnd;
1260 }
1261 
1262 /**
1263  * srp_free_req() - Unmap data and adjust ch->req_lim.
1264  * @ch:     SRP RDMA channel.
1265  * @req:    Request to be freed.
1266  * @scmnd:  SCSI command associated with @req.
1267  * @req_lim_delta: Amount to be added to @target->req_lim.
1268  */
1269 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1270 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1271 {
1272 	unsigned long flags;
1273 
1274 	srp_unmap_data(scmnd, ch, req);
1275 
1276 	spin_lock_irqsave(&ch->lock, flags);
1277 	ch->req_lim += req_lim_delta;
1278 	spin_unlock_irqrestore(&ch->lock, flags);
1279 }
1280 
1281 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1282 			   struct scsi_device *sdev, int result)
1283 {
1284 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1285 
1286 	if (scmnd) {
1287 		srp_free_req(ch, req, scmnd, 0);
1288 		scmnd->result = result;
1289 		scmnd->scsi_done(scmnd);
1290 	}
1291 }
1292 
1293 static void srp_terminate_io(struct srp_rport *rport)
1294 {
1295 	struct srp_target_port *target = rport->lld_data;
1296 	struct srp_rdma_ch *ch;
1297 	int i, j;
1298 
1299 	for (i = 0; i < target->ch_count; i++) {
1300 		ch = &target->ch[i];
1301 
1302 		for (j = 0; j < target->req_ring_size; ++j) {
1303 			struct srp_request *req = &ch->req_ring[j];
1304 
1305 			srp_finish_req(ch, req, NULL,
1306 				       DID_TRANSPORT_FAILFAST << 16);
1307 		}
1308 	}
1309 }
1310 
1311 /* Calculate maximum initiator to target information unit length. */
1312 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1313 				  uint32_t max_it_iu_size)
1314 {
1315 	uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1316 		sizeof(struct srp_indirect_buf) +
1317 		cmd_sg_cnt * sizeof(struct srp_direct_buf);
1318 
1319 	if (use_imm_data)
1320 		max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1321 				 srp_max_imm_data);
1322 
1323 	if (max_it_iu_size)
1324 		max_iu_len = min(max_iu_len, max_it_iu_size);
1325 
1326 	pr_debug("max_iu_len = %d\n", max_iu_len);
1327 
1328 	return max_iu_len;
1329 }
1330 
1331 /*
1332  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1333  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1334  * srp_reset_device() or srp_reset_host() calls will occur while this function
1335  * is in progress. One way to realize that is not to call this function
1336  * directly but to call srp_reconnect_rport() instead since that last function
1337  * serializes calls of this function via rport->mutex and also blocks
1338  * srp_queuecommand() calls before invoking this function.
1339  */
1340 static int srp_rport_reconnect(struct srp_rport *rport)
1341 {
1342 	struct srp_target_port *target = rport->lld_data;
1343 	struct srp_rdma_ch *ch;
1344 	uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1345 						srp_use_imm_data,
1346 						target->max_it_iu_size);
1347 	int i, j, ret = 0;
1348 	bool multich = false;
1349 
1350 	srp_disconnect_target(target);
1351 
1352 	if (target->state == SRP_TARGET_SCANNING)
1353 		return -ENODEV;
1354 
1355 	/*
1356 	 * Now get a new local CM ID so that we avoid confusing the target in
1357 	 * case things are really fouled up. Doing so also ensures that all CM
1358 	 * callbacks will have finished before a new QP is allocated.
1359 	 */
1360 	for (i = 0; i < target->ch_count; i++) {
1361 		ch = &target->ch[i];
1362 		ret += srp_new_cm_id(ch);
1363 	}
1364 	for (i = 0; i < target->ch_count; i++) {
1365 		ch = &target->ch[i];
1366 		for (j = 0; j < target->req_ring_size; ++j) {
1367 			struct srp_request *req = &ch->req_ring[j];
1368 
1369 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1370 		}
1371 	}
1372 	for (i = 0; i < target->ch_count; i++) {
1373 		ch = &target->ch[i];
1374 		/*
1375 		 * Whether or not creating a new CM ID succeeded, create a new
1376 		 * QP. This guarantees that all completion callback function
1377 		 * invocations have finished before request resetting starts.
1378 		 */
1379 		ret += srp_create_ch_ib(ch);
1380 
1381 		INIT_LIST_HEAD(&ch->free_tx);
1382 		for (j = 0; j < target->queue_size; ++j)
1383 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1384 	}
1385 
1386 	target->qp_in_error = false;
1387 
1388 	for (i = 0; i < target->ch_count; i++) {
1389 		ch = &target->ch[i];
1390 		if (ret)
1391 			break;
1392 		ret = srp_connect_ch(ch, max_iu_len, multich);
1393 		multich = true;
1394 	}
1395 
1396 	if (ret == 0)
1397 		shost_printk(KERN_INFO, target->scsi_host,
1398 			     PFX "reconnect succeeded\n");
1399 
1400 	return ret;
1401 }
1402 
1403 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1404 			 unsigned int dma_len, u32 rkey)
1405 {
1406 	struct srp_direct_buf *desc = state->desc;
1407 
1408 	WARN_ON_ONCE(!dma_len);
1409 
1410 	desc->va = cpu_to_be64(dma_addr);
1411 	desc->key = cpu_to_be32(rkey);
1412 	desc->len = cpu_to_be32(dma_len);
1413 
1414 	state->total_len += dma_len;
1415 	state->desc++;
1416 	state->ndesc++;
1417 }
1418 
1419 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1420 {
1421 	srp_handle_qp_err(cq, wc, "FAST REG");
1422 }
1423 
1424 /*
1425  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1426  * where to start in the first element. If sg_offset_p != NULL then
1427  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1428  * byte that has not yet been mapped.
1429  */
1430 static int srp_map_finish_fr(struct srp_map_state *state,
1431 			     struct srp_request *req,
1432 			     struct srp_rdma_ch *ch, int sg_nents,
1433 			     unsigned int *sg_offset_p)
1434 {
1435 	struct srp_target_port *target = ch->target;
1436 	struct srp_device *dev = target->srp_host->srp_dev;
1437 	struct ib_reg_wr wr;
1438 	struct srp_fr_desc *desc;
1439 	u32 rkey;
1440 	int n, err;
1441 
1442 	if (state->fr.next >= state->fr.end) {
1443 		shost_printk(KERN_ERR, ch->target->scsi_host,
1444 			     PFX "Out of MRs (mr_per_cmd = %d)\n",
1445 			     ch->target->mr_per_cmd);
1446 		return -ENOMEM;
1447 	}
1448 
1449 	WARN_ON_ONCE(!dev->use_fast_reg);
1450 
1451 	if (sg_nents == 1 && target->global_rkey) {
1452 		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1453 
1454 		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1455 			     sg_dma_len(state->sg) - sg_offset,
1456 			     target->global_rkey);
1457 		if (sg_offset_p)
1458 			*sg_offset_p = 0;
1459 		return 1;
1460 	}
1461 
1462 	desc = srp_fr_pool_get(ch->fr_pool);
1463 	if (!desc)
1464 		return -ENOMEM;
1465 
1466 	rkey = ib_inc_rkey(desc->mr->rkey);
1467 	ib_update_fast_reg_key(desc->mr, rkey);
1468 
1469 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1470 			 dev->mr_page_size);
1471 	if (unlikely(n < 0)) {
1472 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1473 		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1474 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1475 			 sg_offset_p ? *sg_offset_p : -1, n);
1476 		return n;
1477 	}
1478 
1479 	WARN_ON_ONCE(desc->mr->length == 0);
1480 
1481 	req->reg_cqe.done = srp_reg_mr_err_done;
1482 
1483 	wr.wr.next = NULL;
1484 	wr.wr.opcode = IB_WR_REG_MR;
1485 	wr.wr.wr_cqe = &req->reg_cqe;
1486 	wr.wr.num_sge = 0;
1487 	wr.wr.send_flags = 0;
1488 	wr.mr = desc->mr;
1489 	wr.key = desc->mr->rkey;
1490 	wr.access = (IB_ACCESS_LOCAL_WRITE |
1491 		     IB_ACCESS_REMOTE_READ |
1492 		     IB_ACCESS_REMOTE_WRITE);
1493 
1494 	*state->fr.next++ = desc;
1495 	state->nmdesc++;
1496 
1497 	srp_map_desc(state, desc->mr->iova,
1498 		     desc->mr->length, desc->mr->rkey);
1499 
1500 	err = ib_post_send(ch->qp, &wr.wr, NULL);
1501 	if (unlikely(err)) {
1502 		WARN_ON_ONCE(err == -ENOMEM);
1503 		return err;
1504 	}
1505 
1506 	return n;
1507 }
1508 
1509 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1510 			 struct srp_request *req, struct scatterlist *scat,
1511 			 int count)
1512 {
1513 	unsigned int sg_offset = 0;
1514 
1515 	state->fr.next = req->fr_list;
1516 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1517 	state->sg = scat;
1518 
1519 	if (count == 0)
1520 		return 0;
1521 
1522 	while (count) {
1523 		int i, n;
1524 
1525 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1526 		if (unlikely(n < 0))
1527 			return n;
1528 
1529 		count -= n;
1530 		for (i = 0; i < n; i++)
1531 			state->sg = sg_next(state->sg);
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1538 			  struct srp_request *req, struct scatterlist *scat,
1539 			  int count)
1540 {
1541 	struct srp_target_port *target = ch->target;
1542 	struct scatterlist *sg;
1543 	int i;
1544 
1545 	for_each_sg(scat, sg, count, i) {
1546 		srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1547 			     target->global_rkey);
1548 	}
1549 
1550 	return 0;
1551 }
1552 
1553 /*
1554  * Register the indirect data buffer descriptor with the HCA.
1555  *
1556  * Note: since the indirect data buffer descriptor has been allocated with
1557  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1558  * memory buffer.
1559  */
1560 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1561 		       void **next_mr, void **end_mr, u32 idb_len,
1562 		       __be32 *idb_rkey)
1563 {
1564 	struct srp_target_port *target = ch->target;
1565 	struct srp_device *dev = target->srp_host->srp_dev;
1566 	struct srp_map_state state;
1567 	struct srp_direct_buf idb_desc;
1568 	struct scatterlist idb_sg[1];
1569 	int ret;
1570 
1571 	memset(&state, 0, sizeof(state));
1572 	memset(&idb_desc, 0, sizeof(idb_desc));
1573 	state.gen.next = next_mr;
1574 	state.gen.end = end_mr;
1575 	state.desc = &idb_desc;
1576 	state.base_dma_addr = req->indirect_dma_addr;
1577 	state.dma_len = idb_len;
1578 
1579 	if (dev->use_fast_reg) {
1580 		state.sg = idb_sg;
1581 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1582 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1583 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1584 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1585 #endif
1586 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1587 		if (ret < 0)
1588 			return ret;
1589 		WARN_ON_ONCE(ret < 1);
1590 	} else {
1591 		return -EINVAL;
1592 	}
1593 
1594 	*idb_rkey = idb_desc.key;
1595 
1596 	return 0;
1597 }
1598 
1599 static void srp_check_mapping(struct srp_map_state *state,
1600 			      struct srp_rdma_ch *ch, struct srp_request *req,
1601 			      struct scatterlist *scat, int count)
1602 {
1603 	struct srp_device *dev = ch->target->srp_host->srp_dev;
1604 	struct srp_fr_desc **pfr;
1605 	u64 desc_len = 0, mr_len = 0;
1606 	int i;
1607 
1608 	for (i = 0; i < state->ndesc; i++)
1609 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1610 	if (dev->use_fast_reg)
1611 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1612 			mr_len += (*pfr)->mr->length;
1613 	if (desc_len != scsi_bufflen(req->scmnd) ||
1614 	    mr_len > scsi_bufflen(req->scmnd))
1615 		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1616 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1617 		       state->ndesc, state->nmdesc);
1618 }
1619 
1620 /**
1621  * srp_map_data() - map SCSI data buffer onto an SRP request
1622  * @scmnd: SCSI command to map
1623  * @ch: SRP RDMA channel
1624  * @req: SRP request
1625  *
1626  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1627  * mapping failed. The size of any immediate data is not included in the
1628  * return value.
1629  */
1630 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1631 			struct srp_request *req)
1632 {
1633 	struct srp_target_port *target = ch->target;
1634 	struct scatterlist *scat, *sg;
1635 	struct srp_cmd *cmd = req->cmd->buf;
1636 	int i, len, nents, count, ret;
1637 	struct srp_device *dev;
1638 	struct ib_device *ibdev;
1639 	struct srp_map_state state;
1640 	struct srp_indirect_buf *indirect_hdr;
1641 	u64 data_len;
1642 	u32 idb_len, table_len;
1643 	__be32 idb_rkey;
1644 	u8 fmt;
1645 
1646 	req->cmd->num_sge = 1;
1647 
1648 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1649 		return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1650 
1651 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1652 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1653 		shost_printk(KERN_WARNING, target->scsi_host,
1654 			     PFX "Unhandled data direction %d\n",
1655 			     scmnd->sc_data_direction);
1656 		return -EINVAL;
1657 	}
1658 
1659 	nents = scsi_sg_count(scmnd);
1660 	scat  = scsi_sglist(scmnd);
1661 	data_len = scsi_bufflen(scmnd);
1662 
1663 	dev = target->srp_host->srp_dev;
1664 	ibdev = dev->dev;
1665 
1666 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1667 	if (unlikely(count == 0))
1668 		return -EIO;
1669 
1670 	if (ch->use_imm_data &&
1671 	    count <= ch->max_imm_sge &&
1672 	    SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1673 	    scmnd->sc_data_direction == DMA_TO_DEVICE) {
1674 		struct srp_imm_buf *buf;
1675 		struct ib_sge *sge = &req->cmd->sge[1];
1676 
1677 		fmt = SRP_DATA_DESC_IMM;
1678 		len = SRP_IMM_DATA_OFFSET;
1679 		req->nmdesc = 0;
1680 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1681 		buf->len = cpu_to_be32(data_len);
1682 		WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1683 		for_each_sg(scat, sg, count, i) {
1684 			sge[i].addr   = sg_dma_address(sg);
1685 			sge[i].length = sg_dma_len(sg);
1686 			sge[i].lkey   = target->lkey;
1687 		}
1688 		req->cmd->num_sge += count;
1689 		goto map_complete;
1690 	}
1691 
1692 	fmt = SRP_DATA_DESC_DIRECT;
1693 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1694 		sizeof(struct srp_direct_buf);
1695 
1696 	if (count == 1 && target->global_rkey) {
1697 		/*
1698 		 * The midlayer only generated a single gather/scatter
1699 		 * entry, or DMA mapping coalesced everything to a
1700 		 * single entry.  So a direct descriptor along with
1701 		 * the DMA MR suffices.
1702 		 */
1703 		struct srp_direct_buf *buf;
1704 
1705 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1706 		buf->va  = cpu_to_be64(sg_dma_address(scat));
1707 		buf->key = cpu_to_be32(target->global_rkey);
1708 		buf->len = cpu_to_be32(sg_dma_len(scat));
1709 
1710 		req->nmdesc = 0;
1711 		goto map_complete;
1712 	}
1713 
1714 	/*
1715 	 * We have more than one scatter/gather entry, so build our indirect
1716 	 * descriptor table, trying to merge as many entries as we can.
1717 	 */
1718 	indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1719 
1720 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1721 				   target->indirect_size, DMA_TO_DEVICE);
1722 
1723 	memset(&state, 0, sizeof(state));
1724 	state.desc = req->indirect_desc;
1725 	if (dev->use_fast_reg)
1726 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
1727 	else
1728 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1729 	req->nmdesc = state.nmdesc;
1730 	if (ret < 0)
1731 		goto unmap;
1732 
1733 	{
1734 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1735 			"Memory mapping consistency check");
1736 		if (DYNAMIC_DEBUG_BRANCH(ddm))
1737 			srp_check_mapping(&state, ch, req, scat, count);
1738 	}
1739 
1740 	/* We've mapped the request, now pull as much of the indirect
1741 	 * descriptor table as we can into the command buffer. If this
1742 	 * target is not using an external indirect table, we are
1743 	 * guaranteed to fit into the command, as the SCSI layer won't
1744 	 * give us more S/G entries than we allow.
1745 	 */
1746 	if (state.ndesc == 1) {
1747 		/*
1748 		 * Memory registration collapsed the sg-list into one entry,
1749 		 * so use a direct descriptor.
1750 		 */
1751 		struct srp_direct_buf *buf;
1752 
1753 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1754 		*buf = req->indirect_desc[0];
1755 		goto map_complete;
1756 	}
1757 
1758 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1759 						!target->allow_ext_sg)) {
1760 		shost_printk(KERN_ERR, target->scsi_host,
1761 			     "Could not fit S/G list into SRP_CMD\n");
1762 		ret = -EIO;
1763 		goto unmap;
1764 	}
1765 
1766 	count = min(state.ndesc, target->cmd_sg_cnt);
1767 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1768 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1769 
1770 	fmt = SRP_DATA_DESC_INDIRECT;
1771 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1772 		sizeof(struct srp_indirect_buf);
1773 	len += count * sizeof (struct srp_direct_buf);
1774 
1775 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1776 	       count * sizeof (struct srp_direct_buf));
1777 
1778 	if (!target->global_rkey) {
1779 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1780 				  idb_len, &idb_rkey);
1781 		if (ret < 0)
1782 			goto unmap;
1783 		req->nmdesc++;
1784 	} else {
1785 		idb_rkey = cpu_to_be32(target->global_rkey);
1786 	}
1787 
1788 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1789 	indirect_hdr->table_desc.key = idb_rkey;
1790 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1791 	indirect_hdr->len = cpu_to_be32(state.total_len);
1792 
1793 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1794 		cmd->data_out_desc_cnt = count;
1795 	else
1796 		cmd->data_in_desc_cnt = count;
1797 
1798 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1799 				      DMA_TO_DEVICE);
1800 
1801 map_complete:
1802 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1803 		cmd->buf_fmt = fmt << 4;
1804 	else
1805 		cmd->buf_fmt = fmt;
1806 
1807 	return len;
1808 
1809 unmap:
1810 	srp_unmap_data(scmnd, ch, req);
1811 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1812 		ret = -E2BIG;
1813 	return ret;
1814 }
1815 
1816 /*
1817  * Return an IU and possible credit to the free pool
1818  */
1819 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1820 			  enum srp_iu_type iu_type)
1821 {
1822 	unsigned long flags;
1823 
1824 	spin_lock_irqsave(&ch->lock, flags);
1825 	list_add(&iu->list, &ch->free_tx);
1826 	if (iu_type != SRP_IU_RSP)
1827 		++ch->req_lim;
1828 	spin_unlock_irqrestore(&ch->lock, flags);
1829 }
1830 
1831 /*
1832  * Must be called with ch->lock held to protect req_lim and free_tx.
1833  * If IU is not sent, it must be returned using srp_put_tx_iu().
1834  *
1835  * Note:
1836  * An upper limit for the number of allocated information units for each
1837  * request type is:
1838  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1839  *   more than Scsi_Host.can_queue requests.
1840  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1841  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1842  *   one unanswered SRP request to an initiator.
1843  */
1844 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1845 				      enum srp_iu_type iu_type)
1846 {
1847 	struct srp_target_port *target = ch->target;
1848 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1849 	struct srp_iu *iu;
1850 
1851 	lockdep_assert_held(&ch->lock);
1852 
1853 	ib_process_cq_direct(ch->send_cq, -1);
1854 
1855 	if (list_empty(&ch->free_tx))
1856 		return NULL;
1857 
1858 	/* Initiator responses to target requests do not consume credits */
1859 	if (iu_type != SRP_IU_RSP) {
1860 		if (ch->req_lim <= rsv) {
1861 			++target->zero_req_lim;
1862 			return NULL;
1863 		}
1864 
1865 		--ch->req_lim;
1866 	}
1867 
1868 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1869 	list_del(&iu->list);
1870 	return iu;
1871 }
1872 
1873 /*
1874  * Note: if this function is called from inside ib_drain_sq() then it will
1875  * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1876  * with status IB_WC_SUCCESS then that's a bug.
1877  */
1878 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1879 {
1880 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1881 	struct srp_rdma_ch *ch = cq->cq_context;
1882 
1883 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1884 		srp_handle_qp_err(cq, wc, "SEND");
1885 		return;
1886 	}
1887 
1888 	lockdep_assert_held(&ch->lock);
1889 
1890 	list_add(&iu->list, &ch->free_tx);
1891 }
1892 
1893 /**
1894  * srp_post_send() - send an SRP information unit
1895  * @ch: RDMA channel over which to send the information unit.
1896  * @iu: Information unit to send.
1897  * @len: Length of the information unit excluding immediate data.
1898  */
1899 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1900 {
1901 	struct srp_target_port *target = ch->target;
1902 	struct ib_send_wr wr;
1903 
1904 	if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1905 		return -EINVAL;
1906 
1907 	iu->sge[0].addr   = iu->dma;
1908 	iu->sge[0].length = len;
1909 	iu->sge[0].lkey   = target->lkey;
1910 
1911 	iu->cqe.done = srp_send_done;
1912 
1913 	wr.next       = NULL;
1914 	wr.wr_cqe     = &iu->cqe;
1915 	wr.sg_list    = &iu->sge[0];
1916 	wr.num_sge    = iu->num_sge;
1917 	wr.opcode     = IB_WR_SEND;
1918 	wr.send_flags = IB_SEND_SIGNALED;
1919 
1920 	return ib_post_send(ch->qp, &wr, NULL);
1921 }
1922 
1923 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1924 {
1925 	struct srp_target_port *target = ch->target;
1926 	struct ib_recv_wr wr;
1927 	struct ib_sge list;
1928 
1929 	list.addr   = iu->dma;
1930 	list.length = iu->size;
1931 	list.lkey   = target->lkey;
1932 
1933 	iu->cqe.done = srp_recv_done;
1934 
1935 	wr.next     = NULL;
1936 	wr.wr_cqe   = &iu->cqe;
1937 	wr.sg_list  = &list;
1938 	wr.num_sge  = 1;
1939 
1940 	return ib_post_recv(ch->qp, &wr, NULL);
1941 }
1942 
1943 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1944 {
1945 	struct srp_target_port *target = ch->target;
1946 	struct srp_request *req;
1947 	struct scsi_cmnd *scmnd;
1948 	unsigned long flags;
1949 
1950 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1951 		spin_lock_irqsave(&ch->lock, flags);
1952 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1953 		if (rsp->tag == ch->tsk_mgmt_tag) {
1954 			ch->tsk_mgmt_status = -1;
1955 			if (be32_to_cpu(rsp->resp_data_len) >= 4)
1956 				ch->tsk_mgmt_status = rsp->data[3];
1957 			complete(&ch->tsk_mgmt_done);
1958 		} else {
1959 			shost_printk(KERN_ERR, target->scsi_host,
1960 				     "Received tsk mgmt response too late for tag %#llx\n",
1961 				     rsp->tag);
1962 		}
1963 		spin_unlock_irqrestore(&ch->lock, flags);
1964 	} else {
1965 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1966 		if (scmnd && scmnd->host_scribble) {
1967 			req = (void *)scmnd->host_scribble;
1968 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1969 		} else {
1970 			scmnd = NULL;
1971 		}
1972 		if (!scmnd) {
1973 			shost_printk(KERN_ERR, target->scsi_host,
1974 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1975 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1976 
1977 			spin_lock_irqsave(&ch->lock, flags);
1978 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1979 			spin_unlock_irqrestore(&ch->lock, flags);
1980 
1981 			return;
1982 		}
1983 		scmnd->result = rsp->status;
1984 
1985 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1986 			memcpy(scmnd->sense_buffer, rsp->data +
1987 			       be32_to_cpu(rsp->resp_data_len),
1988 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1989 				     SCSI_SENSE_BUFFERSIZE));
1990 		}
1991 
1992 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1993 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1994 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1995 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1996 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1997 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1998 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1999 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
2000 
2001 		srp_free_req(ch, req, scmnd,
2002 			     be32_to_cpu(rsp->req_lim_delta));
2003 
2004 		scmnd->host_scribble = NULL;
2005 		scmnd->scsi_done(scmnd);
2006 	}
2007 }
2008 
2009 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2010 			       void *rsp, int len)
2011 {
2012 	struct srp_target_port *target = ch->target;
2013 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2014 	unsigned long flags;
2015 	struct srp_iu *iu;
2016 	int err;
2017 
2018 	spin_lock_irqsave(&ch->lock, flags);
2019 	ch->req_lim += req_delta;
2020 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2021 	spin_unlock_irqrestore(&ch->lock, flags);
2022 
2023 	if (!iu) {
2024 		shost_printk(KERN_ERR, target->scsi_host, PFX
2025 			     "no IU available to send response\n");
2026 		return 1;
2027 	}
2028 
2029 	iu->num_sge = 1;
2030 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2031 	memcpy(iu->buf, rsp, len);
2032 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2033 
2034 	err = srp_post_send(ch, iu, len);
2035 	if (err) {
2036 		shost_printk(KERN_ERR, target->scsi_host, PFX
2037 			     "unable to post response: %d\n", err);
2038 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2039 	}
2040 
2041 	return err;
2042 }
2043 
2044 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2045 				 struct srp_cred_req *req)
2046 {
2047 	struct srp_cred_rsp rsp = {
2048 		.opcode = SRP_CRED_RSP,
2049 		.tag = req->tag,
2050 	};
2051 	s32 delta = be32_to_cpu(req->req_lim_delta);
2052 
2053 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2054 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2055 			     "problems processing SRP_CRED_REQ\n");
2056 }
2057 
2058 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2059 				struct srp_aer_req *req)
2060 {
2061 	struct srp_target_port *target = ch->target;
2062 	struct srp_aer_rsp rsp = {
2063 		.opcode = SRP_AER_RSP,
2064 		.tag = req->tag,
2065 	};
2066 	s32 delta = be32_to_cpu(req->req_lim_delta);
2067 
2068 	shost_printk(KERN_ERR, target->scsi_host, PFX
2069 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2070 
2071 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2072 		shost_printk(KERN_ERR, target->scsi_host, PFX
2073 			     "problems processing SRP_AER_REQ\n");
2074 }
2075 
2076 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2077 {
2078 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2079 	struct srp_rdma_ch *ch = cq->cq_context;
2080 	struct srp_target_port *target = ch->target;
2081 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2082 	int res;
2083 	u8 opcode;
2084 
2085 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2086 		srp_handle_qp_err(cq, wc, "RECV");
2087 		return;
2088 	}
2089 
2090 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2091 				   DMA_FROM_DEVICE);
2092 
2093 	opcode = *(u8 *) iu->buf;
2094 
2095 	if (0) {
2096 		shost_printk(KERN_ERR, target->scsi_host,
2097 			     PFX "recv completion, opcode 0x%02x\n", opcode);
2098 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2099 			       iu->buf, wc->byte_len, true);
2100 	}
2101 
2102 	switch (opcode) {
2103 	case SRP_RSP:
2104 		srp_process_rsp(ch, iu->buf);
2105 		break;
2106 
2107 	case SRP_CRED_REQ:
2108 		srp_process_cred_req(ch, iu->buf);
2109 		break;
2110 
2111 	case SRP_AER_REQ:
2112 		srp_process_aer_req(ch, iu->buf);
2113 		break;
2114 
2115 	case SRP_T_LOGOUT:
2116 		/* XXX Handle target logout */
2117 		shost_printk(KERN_WARNING, target->scsi_host,
2118 			     PFX "Got target logout request\n");
2119 		break;
2120 
2121 	default:
2122 		shost_printk(KERN_WARNING, target->scsi_host,
2123 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2124 		break;
2125 	}
2126 
2127 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2128 				      DMA_FROM_DEVICE);
2129 
2130 	res = srp_post_recv(ch, iu);
2131 	if (res != 0)
2132 		shost_printk(KERN_ERR, target->scsi_host,
2133 			     PFX "Recv failed with error code %d\n", res);
2134 }
2135 
2136 /**
2137  * srp_tl_err_work() - handle a transport layer error
2138  * @work: Work structure embedded in an SRP target port.
2139  *
2140  * Note: This function may get invoked before the rport has been created,
2141  * hence the target->rport test.
2142  */
2143 static void srp_tl_err_work(struct work_struct *work)
2144 {
2145 	struct srp_target_port *target;
2146 
2147 	target = container_of(work, struct srp_target_port, tl_err_work);
2148 	if (target->rport)
2149 		srp_start_tl_fail_timers(target->rport);
2150 }
2151 
2152 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2153 		const char *opname)
2154 {
2155 	struct srp_rdma_ch *ch = cq->cq_context;
2156 	struct srp_target_port *target = ch->target;
2157 
2158 	if (ch->connected && !target->qp_in_error) {
2159 		shost_printk(KERN_ERR, target->scsi_host,
2160 			     PFX "failed %s status %s (%d) for CQE %p\n",
2161 			     opname, ib_wc_status_msg(wc->status), wc->status,
2162 			     wc->wr_cqe);
2163 		queue_work(system_long_wq, &target->tl_err_work);
2164 	}
2165 	target->qp_in_error = true;
2166 }
2167 
2168 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2169 {
2170 	struct srp_target_port *target = host_to_target(shost);
2171 	struct srp_rdma_ch *ch;
2172 	struct srp_request *req;
2173 	struct srp_iu *iu;
2174 	struct srp_cmd *cmd;
2175 	struct ib_device *dev;
2176 	unsigned long flags;
2177 	u32 tag;
2178 	u16 idx;
2179 	int len, ret;
2180 
2181 	scmnd->result = srp_chkready(target->rport);
2182 	if (unlikely(scmnd->result))
2183 		goto err;
2184 
2185 	WARN_ON_ONCE(scmnd->request->tag < 0);
2186 	tag = blk_mq_unique_tag(scmnd->request);
2187 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2188 	idx = blk_mq_unique_tag_to_tag(tag);
2189 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2190 		  dev_name(&shost->shost_gendev), tag, idx,
2191 		  target->req_ring_size);
2192 
2193 	spin_lock_irqsave(&ch->lock, flags);
2194 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2195 	spin_unlock_irqrestore(&ch->lock, flags);
2196 
2197 	if (!iu)
2198 		goto err;
2199 
2200 	req = &ch->req_ring[idx];
2201 	dev = target->srp_host->srp_dev->dev;
2202 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2203 				   DMA_TO_DEVICE);
2204 
2205 	scmnd->host_scribble = (void *) req;
2206 
2207 	cmd = iu->buf;
2208 	memset(cmd, 0, sizeof *cmd);
2209 
2210 	cmd->opcode = SRP_CMD;
2211 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2212 	cmd->tag    = tag;
2213 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2214 	if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2215 		cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2216 					    4);
2217 		if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2218 			goto err_iu;
2219 	}
2220 
2221 	req->scmnd    = scmnd;
2222 	req->cmd      = iu;
2223 
2224 	len = srp_map_data(scmnd, ch, req);
2225 	if (len < 0) {
2226 		shost_printk(KERN_ERR, target->scsi_host,
2227 			     PFX "Failed to map data (%d)\n", len);
2228 		/*
2229 		 * If we ran out of memory descriptors (-ENOMEM) because an
2230 		 * application is queuing many requests with more than
2231 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2232 		 * to reduce queue depth temporarily.
2233 		 */
2234 		scmnd->result = len == -ENOMEM ?
2235 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2236 		goto err_iu;
2237 	}
2238 
2239 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2240 				      DMA_TO_DEVICE);
2241 
2242 	if (srp_post_send(ch, iu, len)) {
2243 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2244 		scmnd->result = DID_ERROR << 16;
2245 		goto err_unmap;
2246 	}
2247 
2248 	return 0;
2249 
2250 err_unmap:
2251 	srp_unmap_data(scmnd, ch, req);
2252 
2253 err_iu:
2254 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2255 
2256 	/*
2257 	 * Avoid that the loops that iterate over the request ring can
2258 	 * encounter a dangling SCSI command pointer.
2259 	 */
2260 	req->scmnd = NULL;
2261 
2262 err:
2263 	if (scmnd->result) {
2264 		scmnd->scsi_done(scmnd);
2265 		ret = 0;
2266 	} else {
2267 		ret = SCSI_MLQUEUE_HOST_BUSY;
2268 	}
2269 
2270 	return ret;
2271 }
2272 
2273 /*
2274  * Note: the resources allocated in this function are freed in
2275  * srp_free_ch_ib().
2276  */
2277 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2278 {
2279 	struct srp_target_port *target = ch->target;
2280 	int i;
2281 
2282 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2283 			      GFP_KERNEL);
2284 	if (!ch->rx_ring)
2285 		goto err_no_ring;
2286 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2287 			      GFP_KERNEL);
2288 	if (!ch->tx_ring)
2289 		goto err_no_ring;
2290 
2291 	for (i = 0; i < target->queue_size; ++i) {
2292 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2293 					      ch->max_ti_iu_len,
2294 					      GFP_KERNEL, DMA_FROM_DEVICE);
2295 		if (!ch->rx_ring[i])
2296 			goto err;
2297 	}
2298 
2299 	for (i = 0; i < target->queue_size; ++i) {
2300 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2301 					      ch->max_it_iu_len,
2302 					      GFP_KERNEL, DMA_TO_DEVICE);
2303 		if (!ch->tx_ring[i])
2304 			goto err;
2305 
2306 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2307 	}
2308 
2309 	return 0;
2310 
2311 err:
2312 	for (i = 0; i < target->queue_size; ++i) {
2313 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2314 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2315 	}
2316 
2317 
2318 err_no_ring:
2319 	kfree(ch->tx_ring);
2320 	ch->tx_ring = NULL;
2321 	kfree(ch->rx_ring);
2322 	ch->rx_ring = NULL;
2323 
2324 	return -ENOMEM;
2325 }
2326 
2327 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2328 {
2329 	uint64_t T_tr_ns, max_compl_time_ms;
2330 	uint32_t rq_tmo_jiffies;
2331 
2332 	/*
2333 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2334 	 * table 91), both the QP timeout and the retry count have to be set
2335 	 * for RC QP's during the RTR to RTS transition.
2336 	 */
2337 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2338 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2339 
2340 	/*
2341 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2342 	 * it can take before an error completion is generated. See also
2343 	 * C9-140..142 in the IBTA spec for more information about how to
2344 	 * convert the QP Local ACK Timeout value to nanoseconds.
2345 	 */
2346 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2347 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2348 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2349 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2350 
2351 	return rq_tmo_jiffies;
2352 }
2353 
2354 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2355 			       const struct srp_login_rsp *lrsp,
2356 			       struct srp_rdma_ch *ch)
2357 {
2358 	struct srp_target_port *target = ch->target;
2359 	struct ib_qp_attr *qp_attr = NULL;
2360 	int attr_mask = 0;
2361 	int ret = 0;
2362 	int i;
2363 
2364 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2365 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2366 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2367 		ch->use_imm_data  = srp_use_imm_data &&
2368 			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2369 		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2370 						      ch->use_imm_data,
2371 						      target->max_it_iu_size);
2372 		WARN_ON_ONCE(ch->max_it_iu_len >
2373 			     be32_to_cpu(lrsp->max_it_iu_len));
2374 
2375 		if (ch->use_imm_data)
2376 			shost_printk(KERN_DEBUG, target->scsi_host,
2377 				     PFX "using immediate data\n");
2378 
2379 		/*
2380 		 * Reserve credits for task management so we don't
2381 		 * bounce requests back to the SCSI mid-layer.
2382 		 */
2383 		target->scsi_host->can_queue
2384 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2385 			      target->scsi_host->can_queue);
2386 		target->scsi_host->cmd_per_lun
2387 			= min_t(int, target->scsi_host->can_queue,
2388 				target->scsi_host->cmd_per_lun);
2389 	} else {
2390 		shost_printk(KERN_WARNING, target->scsi_host,
2391 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2392 		ret = -ECONNRESET;
2393 		goto error;
2394 	}
2395 
2396 	if (!ch->rx_ring) {
2397 		ret = srp_alloc_iu_bufs(ch);
2398 		if (ret)
2399 			goto error;
2400 	}
2401 
2402 	for (i = 0; i < target->queue_size; i++) {
2403 		struct srp_iu *iu = ch->rx_ring[i];
2404 
2405 		ret = srp_post_recv(ch, iu);
2406 		if (ret)
2407 			goto error;
2408 	}
2409 
2410 	if (!target->using_rdma_cm) {
2411 		ret = -ENOMEM;
2412 		qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2413 		if (!qp_attr)
2414 			goto error;
2415 
2416 		qp_attr->qp_state = IB_QPS_RTR;
2417 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2418 		if (ret)
2419 			goto error_free;
2420 
2421 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2422 		if (ret)
2423 			goto error_free;
2424 
2425 		qp_attr->qp_state = IB_QPS_RTS;
2426 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2427 		if (ret)
2428 			goto error_free;
2429 
2430 		target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2431 
2432 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2433 		if (ret)
2434 			goto error_free;
2435 
2436 		ret = ib_send_cm_rtu(cm_id, NULL, 0);
2437 	}
2438 
2439 error_free:
2440 	kfree(qp_attr);
2441 
2442 error:
2443 	ch->status = ret;
2444 }
2445 
2446 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2447 				  const struct ib_cm_event *event,
2448 				  struct srp_rdma_ch *ch)
2449 {
2450 	struct srp_target_port *target = ch->target;
2451 	struct Scsi_Host *shost = target->scsi_host;
2452 	struct ib_class_port_info *cpi;
2453 	int opcode;
2454 	u16 dlid;
2455 
2456 	switch (event->param.rej_rcvd.reason) {
2457 	case IB_CM_REJ_PORT_CM_REDIRECT:
2458 		cpi = event->param.rej_rcvd.ari;
2459 		dlid = be16_to_cpu(cpi->redirect_lid);
2460 		sa_path_set_dlid(&ch->ib_cm.path, dlid);
2461 		ch->ib_cm.path.pkey = cpi->redirect_pkey;
2462 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2463 		memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2464 
2465 		ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2466 		break;
2467 
2468 	case IB_CM_REJ_PORT_REDIRECT:
2469 		if (srp_target_is_topspin(target)) {
2470 			union ib_gid *dgid = &ch->ib_cm.path.dgid;
2471 
2472 			/*
2473 			 * Topspin/Cisco SRP gateways incorrectly send
2474 			 * reject reason code 25 when they mean 24
2475 			 * (port redirect).
2476 			 */
2477 			memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2478 
2479 			shost_printk(KERN_DEBUG, shost,
2480 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2481 				     be64_to_cpu(dgid->global.subnet_prefix),
2482 				     be64_to_cpu(dgid->global.interface_id));
2483 
2484 			ch->status = SRP_PORT_REDIRECT;
2485 		} else {
2486 			shost_printk(KERN_WARNING, shost,
2487 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2488 			ch->status = -ECONNRESET;
2489 		}
2490 		break;
2491 
2492 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2493 		shost_printk(KERN_WARNING, shost,
2494 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2495 		ch->status = -ECONNRESET;
2496 		break;
2497 
2498 	case IB_CM_REJ_CONSUMER_DEFINED:
2499 		opcode = *(u8 *) event->private_data;
2500 		if (opcode == SRP_LOGIN_REJ) {
2501 			struct srp_login_rej *rej = event->private_data;
2502 			u32 reason = be32_to_cpu(rej->reason);
2503 
2504 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2505 				shost_printk(KERN_WARNING, shost,
2506 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2507 			else
2508 				shost_printk(KERN_WARNING, shost, PFX
2509 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2510 					     target->sgid.raw,
2511 					     target->ib_cm.orig_dgid.raw,
2512 					     reason);
2513 		} else
2514 			shost_printk(KERN_WARNING, shost,
2515 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2516 				     " opcode 0x%02x\n", opcode);
2517 		ch->status = -ECONNRESET;
2518 		break;
2519 
2520 	case IB_CM_REJ_STALE_CONN:
2521 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2522 		ch->status = SRP_STALE_CONN;
2523 		break;
2524 
2525 	default:
2526 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2527 			     event->param.rej_rcvd.reason);
2528 		ch->status = -ECONNRESET;
2529 	}
2530 }
2531 
2532 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2533 			     const struct ib_cm_event *event)
2534 {
2535 	struct srp_rdma_ch *ch = cm_id->context;
2536 	struct srp_target_port *target = ch->target;
2537 	int comp = 0;
2538 
2539 	switch (event->event) {
2540 	case IB_CM_REQ_ERROR:
2541 		shost_printk(KERN_DEBUG, target->scsi_host,
2542 			     PFX "Sending CM REQ failed\n");
2543 		comp = 1;
2544 		ch->status = -ECONNRESET;
2545 		break;
2546 
2547 	case IB_CM_REP_RECEIVED:
2548 		comp = 1;
2549 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2550 		break;
2551 
2552 	case IB_CM_REJ_RECEIVED:
2553 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2554 		comp = 1;
2555 
2556 		srp_ib_cm_rej_handler(cm_id, event, ch);
2557 		break;
2558 
2559 	case IB_CM_DREQ_RECEIVED:
2560 		shost_printk(KERN_WARNING, target->scsi_host,
2561 			     PFX "DREQ received - connection closed\n");
2562 		ch->connected = false;
2563 		if (ib_send_cm_drep(cm_id, NULL, 0))
2564 			shost_printk(KERN_ERR, target->scsi_host,
2565 				     PFX "Sending CM DREP failed\n");
2566 		queue_work(system_long_wq, &target->tl_err_work);
2567 		break;
2568 
2569 	case IB_CM_TIMEWAIT_EXIT:
2570 		shost_printk(KERN_ERR, target->scsi_host,
2571 			     PFX "connection closed\n");
2572 		comp = 1;
2573 
2574 		ch->status = 0;
2575 		break;
2576 
2577 	case IB_CM_MRA_RECEIVED:
2578 	case IB_CM_DREQ_ERROR:
2579 	case IB_CM_DREP_RECEIVED:
2580 		break;
2581 
2582 	default:
2583 		shost_printk(KERN_WARNING, target->scsi_host,
2584 			     PFX "Unhandled CM event %d\n", event->event);
2585 		break;
2586 	}
2587 
2588 	if (comp)
2589 		complete(&ch->done);
2590 
2591 	return 0;
2592 }
2593 
2594 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2595 				    struct rdma_cm_event *event)
2596 {
2597 	struct srp_target_port *target = ch->target;
2598 	struct Scsi_Host *shost = target->scsi_host;
2599 	int opcode;
2600 
2601 	switch (event->status) {
2602 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2603 		shost_printk(KERN_WARNING, shost,
2604 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2605 		ch->status = -ECONNRESET;
2606 		break;
2607 
2608 	case IB_CM_REJ_CONSUMER_DEFINED:
2609 		opcode = *(u8 *) event->param.conn.private_data;
2610 		if (opcode == SRP_LOGIN_REJ) {
2611 			struct srp_login_rej *rej =
2612 				(struct srp_login_rej *)
2613 				event->param.conn.private_data;
2614 			u32 reason = be32_to_cpu(rej->reason);
2615 
2616 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2617 				shost_printk(KERN_WARNING, shost,
2618 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2619 			else
2620 				shost_printk(KERN_WARNING, shost,
2621 					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2622 		} else {
2623 			shost_printk(KERN_WARNING, shost,
2624 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2625 				     opcode);
2626 		}
2627 		ch->status = -ECONNRESET;
2628 		break;
2629 
2630 	case IB_CM_REJ_STALE_CONN:
2631 		shost_printk(KERN_WARNING, shost,
2632 			     "  REJ reason: stale connection\n");
2633 		ch->status = SRP_STALE_CONN;
2634 		break;
2635 
2636 	default:
2637 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2638 			     event->status);
2639 		ch->status = -ECONNRESET;
2640 		break;
2641 	}
2642 }
2643 
2644 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2645 			       struct rdma_cm_event *event)
2646 {
2647 	struct srp_rdma_ch *ch = cm_id->context;
2648 	struct srp_target_port *target = ch->target;
2649 	int comp = 0;
2650 
2651 	switch (event->event) {
2652 	case RDMA_CM_EVENT_ADDR_RESOLVED:
2653 		ch->status = 0;
2654 		comp = 1;
2655 		break;
2656 
2657 	case RDMA_CM_EVENT_ADDR_ERROR:
2658 		ch->status = -ENXIO;
2659 		comp = 1;
2660 		break;
2661 
2662 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
2663 		ch->status = 0;
2664 		comp = 1;
2665 		break;
2666 
2667 	case RDMA_CM_EVENT_ROUTE_ERROR:
2668 	case RDMA_CM_EVENT_UNREACHABLE:
2669 		ch->status = -EHOSTUNREACH;
2670 		comp = 1;
2671 		break;
2672 
2673 	case RDMA_CM_EVENT_CONNECT_ERROR:
2674 		shost_printk(KERN_DEBUG, target->scsi_host,
2675 			     PFX "Sending CM REQ failed\n");
2676 		comp = 1;
2677 		ch->status = -ECONNRESET;
2678 		break;
2679 
2680 	case RDMA_CM_EVENT_ESTABLISHED:
2681 		comp = 1;
2682 		srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2683 		break;
2684 
2685 	case RDMA_CM_EVENT_REJECTED:
2686 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2687 		comp = 1;
2688 
2689 		srp_rdma_cm_rej_handler(ch, event);
2690 		break;
2691 
2692 	case RDMA_CM_EVENT_DISCONNECTED:
2693 		if (ch->connected) {
2694 			shost_printk(KERN_WARNING, target->scsi_host,
2695 				     PFX "received DREQ\n");
2696 			rdma_disconnect(ch->rdma_cm.cm_id);
2697 			comp = 1;
2698 			ch->status = 0;
2699 			queue_work(system_long_wq, &target->tl_err_work);
2700 		}
2701 		break;
2702 
2703 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2704 		shost_printk(KERN_ERR, target->scsi_host,
2705 			     PFX "connection closed\n");
2706 
2707 		comp = 1;
2708 		ch->status = 0;
2709 		break;
2710 
2711 	default:
2712 		shost_printk(KERN_WARNING, target->scsi_host,
2713 			     PFX "Unhandled CM event %d\n", event->event);
2714 		break;
2715 	}
2716 
2717 	if (comp)
2718 		complete(&ch->done);
2719 
2720 	return 0;
2721 }
2722 
2723 /**
2724  * srp_change_queue_depth - setting device queue depth
2725  * @sdev: scsi device struct
2726  * @qdepth: requested queue depth
2727  *
2728  * Returns queue depth.
2729  */
2730 static int
2731 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2732 {
2733 	if (!sdev->tagged_supported)
2734 		qdepth = 1;
2735 	return scsi_change_queue_depth(sdev, qdepth);
2736 }
2737 
2738 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2739 			     u8 func, u8 *status)
2740 {
2741 	struct srp_target_port *target = ch->target;
2742 	struct srp_rport *rport = target->rport;
2743 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2744 	struct srp_iu *iu;
2745 	struct srp_tsk_mgmt *tsk_mgmt;
2746 	int res;
2747 
2748 	if (!ch->connected || target->qp_in_error)
2749 		return -1;
2750 
2751 	/*
2752 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2753 	 * invoked while a task management function is being sent.
2754 	 */
2755 	mutex_lock(&rport->mutex);
2756 	spin_lock_irq(&ch->lock);
2757 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2758 	spin_unlock_irq(&ch->lock);
2759 
2760 	if (!iu) {
2761 		mutex_unlock(&rport->mutex);
2762 
2763 		return -1;
2764 	}
2765 
2766 	iu->num_sge = 1;
2767 
2768 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2769 				   DMA_TO_DEVICE);
2770 	tsk_mgmt = iu->buf;
2771 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2772 
2773 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2774 	int_to_scsilun(lun, &tsk_mgmt->lun);
2775 	tsk_mgmt->tsk_mgmt_func = func;
2776 	tsk_mgmt->task_tag	= req_tag;
2777 
2778 	spin_lock_irq(&ch->lock);
2779 	ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2780 	tsk_mgmt->tag = ch->tsk_mgmt_tag;
2781 	spin_unlock_irq(&ch->lock);
2782 
2783 	init_completion(&ch->tsk_mgmt_done);
2784 
2785 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2786 				      DMA_TO_DEVICE);
2787 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2788 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2789 		mutex_unlock(&rport->mutex);
2790 
2791 		return -1;
2792 	}
2793 	res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2794 					msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2795 	if (res > 0 && status)
2796 		*status = ch->tsk_mgmt_status;
2797 	mutex_unlock(&rport->mutex);
2798 
2799 	WARN_ON_ONCE(res < 0);
2800 
2801 	return res > 0 ? 0 : -1;
2802 }
2803 
2804 static int srp_abort(struct scsi_cmnd *scmnd)
2805 {
2806 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2807 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2808 	u32 tag;
2809 	u16 ch_idx;
2810 	struct srp_rdma_ch *ch;
2811 	int ret;
2812 
2813 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2814 
2815 	if (!req)
2816 		return SUCCESS;
2817 	tag = blk_mq_unique_tag(scmnd->request);
2818 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2819 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2820 		return SUCCESS;
2821 	ch = &target->ch[ch_idx];
2822 	if (!srp_claim_req(ch, req, NULL, scmnd))
2823 		return SUCCESS;
2824 	shost_printk(KERN_ERR, target->scsi_host,
2825 		     "Sending SRP abort for tag %#x\n", tag);
2826 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2827 			      SRP_TSK_ABORT_TASK, NULL) == 0)
2828 		ret = SUCCESS;
2829 	else if (target->rport->state == SRP_RPORT_LOST)
2830 		ret = FAST_IO_FAIL;
2831 	else
2832 		ret = FAILED;
2833 	if (ret == SUCCESS) {
2834 		srp_free_req(ch, req, scmnd, 0);
2835 		scmnd->result = DID_ABORT << 16;
2836 		scmnd->scsi_done(scmnd);
2837 	}
2838 
2839 	return ret;
2840 }
2841 
2842 static int srp_reset_device(struct scsi_cmnd *scmnd)
2843 {
2844 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2845 	struct srp_rdma_ch *ch;
2846 	u8 status;
2847 
2848 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2849 
2850 	ch = &target->ch[0];
2851 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2852 			      SRP_TSK_LUN_RESET, &status))
2853 		return FAILED;
2854 	if (status)
2855 		return FAILED;
2856 
2857 	return SUCCESS;
2858 }
2859 
2860 static int srp_reset_host(struct scsi_cmnd *scmnd)
2861 {
2862 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2863 
2864 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2865 
2866 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2867 }
2868 
2869 static int srp_target_alloc(struct scsi_target *starget)
2870 {
2871 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2872 	struct srp_target_port *target = host_to_target(shost);
2873 
2874 	if (target->target_can_queue)
2875 		starget->can_queue = target->target_can_queue;
2876 	return 0;
2877 }
2878 
2879 static int srp_slave_configure(struct scsi_device *sdev)
2880 {
2881 	struct Scsi_Host *shost = sdev->host;
2882 	struct srp_target_port *target = host_to_target(shost);
2883 	struct request_queue *q = sdev->request_queue;
2884 	unsigned long timeout;
2885 
2886 	if (sdev->type == TYPE_DISK) {
2887 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2888 		blk_queue_rq_timeout(q, timeout);
2889 	}
2890 
2891 	return 0;
2892 }
2893 
2894 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2895 			   char *buf)
2896 {
2897 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2898 
2899 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2900 }
2901 
2902 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2903 			     char *buf)
2904 {
2905 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2906 
2907 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2908 }
2909 
2910 static ssize_t show_service_id(struct device *dev,
2911 			       struct device_attribute *attr, char *buf)
2912 {
2913 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2914 
2915 	if (target->using_rdma_cm)
2916 		return -ENOENT;
2917 	return sysfs_emit(buf, "0x%016llx\n",
2918 			  be64_to_cpu(target->ib_cm.service_id));
2919 }
2920 
2921 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2922 			 char *buf)
2923 {
2924 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2925 
2926 	if (target->using_rdma_cm)
2927 		return -ENOENT;
2928 
2929 	return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2930 }
2931 
2932 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2933 			 char *buf)
2934 {
2935 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2936 
2937 	return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2938 }
2939 
2940 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2941 			 char *buf)
2942 {
2943 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2944 	struct srp_rdma_ch *ch = &target->ch[0];
2945 
2946 	if (target->using_rdma_cm)
2947 		return -ENOENT;
2948 
2949 	return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2950 }
2951 
2952 static ssize_t show_orig_dgid(struct device *dev,
2953 			      struct device_attribute *attr, char *buf)
2954 {
2955 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2956 
2957 	if (target->using_rdma_cm)
2958 		return -ENOENT;
2959 
2960 	return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2961 }
2962 
2963 static ssize_t show_req_lim(struct device *dev,
2964 			    struct device_attribute *attr, char *buf)
2965 {
2966 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2967 	struct srp_rdma_ch *ch;
2968 	int i, req_lim = INT_MAX;
2969 
2970 	for (i = 0; i < target->ch_count; i++) {
2971 		ch = &target->ch[i];
2972 		req_lim = min(req_lim, ch->req_lim);
2973 	}
2974 
2975 	return sysfs_emit(buf, "%d\n", req_lim);
2976 }
2977 
2978 static ssize_t show_zero_req_lim(struct device *dev,
2979 				 struct device_attribute *attr, char *buf)
2980 {
2981 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2982 
2983 	return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2984 }
2985 
2986 static ssize_t show_local_ib_port(struct device *dev,
2987 				  struct device_attribute *attr, char *buf)
2988 {
2989 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2990 
2991 	return sysfs_emit(buf, "%d\n", target->srp_host->port);
2992 }
2993 
2994 static ssize_t show_local_ib_device(struct device *dev,
2995 				    struct device_attribute *attr, char *buf)
2996 {
2997 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2998 
2999 	return sysfs_emit(buf, "%s\n",
3000 			  dev_name(&target->srp_host->srp_dev->dev->dev));
3001 }
3002 
3003 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
3004 			     char *buf)
3005 {
3006 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3007 
3008 	return sysfs_emit(buf, "%d\n", target->ch_count);
3009 }
3010 
3011 static ssize_t show_comp_vector(struct device *dev,
3012 				struct device_attribute *attr, char *buf)
3013 {
3014 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3015 
3016 	return sysfs_emit(buf, "%d\n", target->comp_vector);
3017 }
3018 
3019 static ssize_t show_tl_retry_count(struct device *dev,
3020 				   struct device_attribute *attr, char *buf)
3021 {
3022 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3023 
3024 	return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3025 }
3026 
3027 static ssize_t show_cmd_sg_entries(struct device *dev,
3028 				   struct device_attribute *attr, char *buf)
3029 {
3030 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3031 
3032 	return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3033 }
3034 
3035 static ssize_t show_allow_ext_sg(struct device *dev,
3036 				 struct device_attribute *attr, char *buf)
3037 {
3038 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3039 
3040 	return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3041 }
3042 
3043 static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
3044 static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
3045 static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
3046 static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
3047 static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
3048 static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
3049 static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
3050 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
3051 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
3052 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
3053 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
3054 static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
3055 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
3056 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
3057 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
3058 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
3059 
3060 static struct device_attribute *srp_host_attrs[] = {
3061 	&dev_attr_id_ext,
3062 	&dev_attr_ioc_guid,
3063 	&dev_attr_service_id,
3064 	&dev_attr_pkey,
3065 	&dev_attr_sgid,
3066 	&dev_attr_dgid,
3067 	&dev_attr_orig_dgid,
3068 	&dev_attr_req_lim,
3069 	&dev_attr_zero_req_lim,
3070 	&dev_attr_local_ib_port,
3071 	&dev_attr_local_ib_device,
3072 	&dev_attr_ch_count,
3073 	&dev_attr_comp_vector,
3074 	&dev_attr_tl_retry_count,
3075 	&dev_attr_cmd_sg_entries,
3076 	&dev_attr_allow_ext_sg,
3077 	NULL
3078 };
3079 
3080 static struct scsi_host_template srp_template = {
3081 	.module				= THIS_MODULE,
3082 	.name				= "InfiniBand SRP initiator",
3083 	.proc_name			= DRV_NAME,
3084 	.target_alloc			= srp_target_alloc,
3085 	.slave_configure		= srp_slave_configure,
3086 	.info				= srp_target_info,
3087 	.queuecommand			= srp_queuecommand,
3088 	.change_queue_depth             = srp_change_queue_depth,
3089 	.eh_timed_out			= srp_timed_out,
3090 	.eh_abort_handler		= srp_abort,
3091 	.eh_device_reset_handler	= srp_reset_device,
3092 	.eh_host_reset_handler		= srp_reset_host,
3093 	.skip_settle_delay		= true,
3094 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
3095 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
3096 	.this_id			= -1,
3097 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
3098 	.shost_attrs			= srp_host_attrs,
3099 	.track_queue_depth		= 1,
3100 };
3101 
3102 static int srp_sdev_count(struct Scsi_Host *host)
3103 {
3104 	struct scsi_device *sdev;
3105 	int c = 0;
3106 
3107 	shost_for_each_device(sdev, host)
3108 		c++;
3109 
3110 	return c;
3111 }
3112 
3113 /*
3114  * Return values:
3115  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3116  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3117  *    removal has been scheduled.
3118  * 0 and target->state != SRP_TARGET_REMOVED upon success.
3119  */
3120 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3121 {
3122 	struct srp_rport_identifiers ids;
3123 	struct srp_rport *rport;
3124 
3125 	target->state = SRP_TARGET_SCANNING;
3126 	sprintf(target->target_name, "SRP.T10:%016llX",
3127 		be64_to_cpu(target->id_ext));
3128 
3129 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3130 		return -ENODEV;
3131 
3132 	memcpy(ids.port_id, &target->id_ext, 8);
3133 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3134 	ids.roles = SRP_RPORT_ROLE_TARGET;
3135 	rport = srp_rport_add(target->scsi_host, &ids);
3136 	if (IS_ERR(rport)) {
3137 		scsi_remove_host(target->scsi_host);
3138 		return PTR_ERR(rport);
3139 	}
3140 
3141 	rport->lld_data = target;
3142 	target->rport = rport;
3143 
3144 	spin_lock(&host->target_lock);
3145 	list_add_tail(&target->list, &host->target_list);
3146 	spin_unlock(&host->target_lock);
3147 
3148 	scsi_scan_target(&target->scsi_host->shost_gendev,
3149 			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3150 
3151 	if (srp_connected_ch(target) < target->ch_count ||
3152 	    target->qp_in_error) {
3153 		shost_printk(KERN_INFO, target->scsi_host,
3154 			     PFX "SCSI scan failed - removing SCSI host\n");
3155 		srp_queue_remove_work(target);
3156 		goto out;
3157 	}
3158 
3159 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3160 		 dev_name(&target->scsi_host->shost_gendev),
3161 		 srp_sdev_count(target->scsi_host));
3162 
3163 	spin_lock_irq(&target->lock);
3164 	if (target->state == SRP_TARGET_SCANNING)
3165 		target->state = SRP_TARGET_LIVE;
3166 	spin_unlock_irq(&target->lock);
3167 
3168 out:
3169 	return 0;
3170 }
3171 
3172 static void srp_release_dev(struct device *dev)
3173 {
3174 	struct srp_host *host =
3175 		container_of(dev, struct srp_host, dev);
3176 
3177 	complete(&host->released);
3178 }
3179 
3180 static struct class srp_class = {
3181 	.name    = "infiniband_srp",
3182 	.dev_release = srp_release_dev
3183 };
3184 
3185 /**
3186  * srp_conn_unique() - check whether the connection to a target is unique
3187  * @host:   SRP host.
3188  * @target: SRP target port.
3189  */
3190 static bool srp_conn_unique(struct srp_host *host,
3191 			    struct srp_target_port *target)
3192 {
3193 	struct srp_target_port *t;
3194 	bool ret = false;
3195 
3196 	if (target->state == SRP_TARGET_REMOVED)
3197 		goto out;
3198 
3199 	ret = true;
3200 
3201 	spin_lock(&host->target_lock);
3202 	list_for_each_entry(t, &host->target_list, list) {
3203 		if (t != target &&
3204 		    target->id_ext == t->id_ext &&
3205 		    target->ioc_guid == t->ioc_guid &&
3206 		    target->initiator_ext == t->initiator_ext) {
3207 			ret = false;
3208 			break;
3209 		}
3210 	}
3211 	spin_unlock(&host->target_lock);
3212 
3213 out:
3214 	return ret;
3215 }
3216 
3217 /*
3218  * Target ports are added by writing
3219  *
3220  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3221  *     pkey=<P_Key>,service_id=<service ID>
3222  * or
3223  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3224  *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3225  *
3226  * to the add_target sysfs attribute.
3227  */
3228 enum {
3229 	SRP_OPT_ERR		= 0,
3230 	SRP_OPT_ID_EXT		= 1 << 0,
3231 	SRP_OPT_IOC_GUID	= 1 << 1,
3232 	SRP_OPT_DGID		= 1 << 2,
3233 	SRP_OPT_PKEY		= 1 << 3,
3234 	SRP_OPT_SERVICE_ID	= 1 << 4,
3235 	SRP_OPT_MAX_SECT	= 1 << 5,
3236 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
3237 	SRP_OPT_IO_CLASS	= 1 << 7,
3238 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
3239 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3240 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3241 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
3242 	SRP_OPT_COMP_VECTOR	= 1 << 12,
3243 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
3244 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3245 	SRP_OPT_IP_SRC		= 1 << 15,
3246 	SRP_OPT_IP_DEST		= 1 << 16,
3247 	SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3248 	SRP_OPT_MAX_IT_IU_SIZE  = 1 << 18,
3249 	SRP_OPT_CH_COUNT	= 1 << 19,
3250 };
3251 
3252 static unsigned int srp_opt_mandatory[] = {
3253 	SRP_OPT_ID_EXT		|
3254 	SRP_OPT_IOC_GUID	|
3255 	SRP_OPT_DGID		|
3256 	SRP_OPT_PKEY		|
3257 	SRP_OPT_SERVICE_ID,
3258 	SRP_OPT_ID_EXT		|
3259 	SRP_OPT_IOC_GUID	|
3260 	SRP_OPT_IP_DEST,
3261 };
3262 
3263 static const match_table_t srp_opt_tokens = {
3264 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3265 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3266 	{ SRP_OPT_DGID,			"dgid=%s" 		},
3267 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3268 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3269 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
3270 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
3271 	{ SRP_OPT_TARGET_CAN_QUEUE,	"target_can_queue=%d"	},
3272 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
3273 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
3274 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3275 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3276 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
3277 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
3278 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
3279 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3280 	{ SRP_OPT_IP_SRC,		"src=%s"		},
3281 	{ SRP_OPT_IP_DEST,		"dest=%s"		},
3282 	{ SRP_OPT_MAX_IT_IU_SIZE,	"max_it_iu_size=%d"	},
3283 	{ SRP_OPT_CH_COUNT,		"ch_count=%u",		},
3284 	{ SRP_OPT_ERR,			NULL 			}
3285 };
3286 
3287 /**
3288  * srp_parse_in - parse an IP address and port number combination
3289  * @net:	   [in]  Network namespace.
3290  * @sa:		   [out] Address family, IP address and port number.
3291  * @addr_port_str: [in]  IP address and port number.
3292  * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
3293  *
3294  * Parse the following address formats:
3295  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3296  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3297  */
3298 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3299 			const char *addr_port_str, bool *has_port)
3300 {
3301 	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3302 	char *port_str;
3303 	int ret;
3304 
3305 	if (!addr)
3306 		return -ENOMEM;
3307 	port_str = strrchr(addr, ':');
3308 	if (port_str && strchr(port_str, ']'))
3309 		port_str = NULL;
3310 	if (port_str)
3311 		*port_str++ = '\0';
3312 	if (has_port)
3313 		*has_port = port_str != NULL;
3314 	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3315 	if (ret && addr[0]) {
3316 		addr_end = addr + strlen(addr) - 1;
3317 		if (addr[0] == '[' && *addr_end == ']') {
3318 			*addr_end = '\0';
3319 			ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3320 						   port_str, sa);
3321 		}
3322 	}
3323 	kfree(addr);
3324 	pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3325 	return ret;
3326 }
3327 
3328 static int srp_parse_options(struct net *net, const char *buf,
3329 			     struct srp_target_port *target)
3330 {
3331 	char *options, *sep_opt;
3332 	char *p;
3333 	substring_t args[MAX_OPT_ARGS];
3334 	unsigned long long ull;
3335 	bool has_port;
3336 	int opt_mask = 0;
3337 	int token;
3338 	int ret = -EINVAL;
3339 	int i;
3340 
3341 	options = kstrdup(buf, GFP_KERNEL);
3342 	if (!options)
3343 		return -ENOMEM;
3344 
3345 	sep_opt = options;
3346 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3347 		if (!*p)
3348 			continue;
3349 
3350 		token = match_token(p, srp_opt_tokens, args);
3351 		opt_mask |= token;
3352 
3353 		switch (token) {
3354 		case SRP_OPT_ID_EXT:
3355 			p = match_strdup(args);
3356 			if (!p) {
3357 				ret = -ENOMEM;
3358 				goto out;
3359 			}
3360 			ret = kstrtoull(p, 16, &ull);
3361 			if (ret) {
3362 				pr_warn("invalid id_ext parameter '%s'\n", p);
3363 				kfree(p);
3364 				goto out;
3365 			}
3366 			target->id_ext = cpu_to_be64(ull);
3367 			kfree(p);
3368 			break;
3369 
3370 		case SRP_OPT_IOC_GUID:
3371 			p = match_strdup(args);
3372 			if (!p) {
3373 				ret = -ENOMEM;
3374 				goto out;
3375 			}
3376 			ret = kstrtoull(p, 16, &ull);
3377 			if (ret) {
3378 				pr_warn("invalid ioc_guid parameter '%s'\n", p);
3379 				kfree(p);
3380 				goto out;
3381 			}
3382 			target->ioc_guid = cpu_to_be64(ull);
3383 			kfree(p);
3384 			break;
3385 
3386 		case SRP_OPT_DGID:
3387 			p = match_strdup(args);
3388 			if (!p) {
3389 				ret = -ENOMEM;
3390 				goto out;
3391 			}
3392 			if (strlen(p) != 32) {
3393 				pr_warn("bad dest GID parameter '%s'\n", p);
3394 				kfree(p);
3395 				goto out;
3396 			}
3397 
3398 			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3399 			kfree(p);
3400 			if (ret < 0)
3401 				goto out;
3402 			break;
3403 
3404 		case SRP_OPT_PKEY:
3405 			if (match_hex(args, &token)) {
3406 				pr_warn("bad P_Key parameter '%s'\n", p);
3407 				goto out;
3408 			}
3409 			target->ib_cm.pkey = cpu_to_be16(token);
3410 			break;
3411 
3412 		case SRP_OPT_SERVICE_ID:
3413 			p = match_strdup(args);
3414 			if (!p) {
3415 				ret = -ENOMEM;
3416 				goto out;
3417 			}
3418 			ret = kstrtoull(p, 16, &ull);
3419 			if (ret) {
3420 				pr_warn("bad service_id parameter '%s'\n", p);
3421 				kfree(p);
3422 				goto out;
3423 			}
3424 			target->ib_cm.service_id = cpu_to_be64(ull);
3425 			kfree(p);
3426 			break;
3427 
3428 		case SRP_OPT_IP_SRC:
3429 			p = match_strdup(args);
3430 			if (!p) {
3431 				ret = -ENOMEM;
3432 				goto out;
3433 			}
3434 			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3435 					   NULL);
3436 			if (ret < 0) {
3437 				pr_warn("bad source parameter '%s'\n", p);
3438 				kfree(p);
3439 				goto out;
3440 			}
3441 			target->rdma_cm.src_specified = true;
3442 			kfree(p);
3443 			break;
3444 
3445 		case SRP_OPT_IP_DEST:
3446 			p = match_strdup(args);
3447 			if (!p) {
3448 				ret = -ENOMEM;
3449 				goto out;
3450 			}
3451 			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3452 					   &has_port);
3453 			if (!has_port)
3454 				ret = -EINVAL;
3455 			if (ret < 0) {
3456 				pr_warn("bad dest parameter '%s'\n", p);
3457 				kfree(p);
3458 				goto out;
3459 			}
3460 			target->using_rdma_cm = true;
3461 			kfree(p);
3462 			break;
3463 
3464 		case SRP_OPT_MAX_SECT:
3465 			if (match_int(args, &token)) {
3466 				pr_warn("bad max sect parameter '%s'\n", p);
3467 				goto out;
3468 			}
3469 			target->scsi_host->max_sectors = token;
3470 			break;
3471 
3472 		case SRP_OPT_QUEUE_SIZE:
3473 			if (match_int(args, &token) || token < 1) {
3474 				pr_warn("bad queue_size parameter '%s'\n", p);
3475 				goto out;
3476 			}
3477 			target->scsi_host->can_queue = token;
3478 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3479 					     SRP_TSK_MGMT_SQ_SIZE;
3480 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3481 				target->scsi_host->cmd_per_lun = token;
3482 			break;
3483 
3484 		case SRP_OPT_MAX_CMD_PER_LUN:
3485 			if (match_int(args, &token) || token < 1) {
3486 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3487 					p);
3488 				goto out;
3489 			}
3490 			target->scsi_host->cmd_per_lun = token;
3491 			break;
3492 
3493 		case SRP_OPT_TARGET_CAN_QUEUE:
3494 			if (match_int(args, &token) || token < 1) {
3495 				pr_warn("bad max target_can_queue parameter '%s'\n",
3496 					p);
3497 				goto out;
3498 			}
3499 			target->target_can_queue = token;
3500 			break;
3501 
3502 		case SRP_OPT_IO_CLASS:
3503 			if (match_hex(args, &token)) {
3504 				pr_warn("bad IO class parameter '%s'\n", p);
3505 				goto out;
3506 			}
3507 			if (token != SRP_REV10_IB_IO_CLASS &&
3508 			    token != SRP_REV16A_IB_IO_CLASS) {
3509 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3510 					token, SRP_REV10_IB_IO_CLASS,
3511 					SRP_REV16A_IB_IO_CLASS);
3512 				goto out;
3513 			}
3514 			target->io_class = token;
3515 			break;
3516 
3517 		case SRP_OPT_INITIATOR_EXT:
3518 			p = match_strdup(args);
3519 			if (!p) {
3520 				ret = -ENOMEM;
3521 				goto out;
3522 			}
3523 			ret = kstrtoull(p, 16, &ull);
3524 			if (ret) {
3525 				pr_warn("bad initiator_ext value '%s'\n", p);
3526 				kfree(p);
3527 				goto out;
3528 			}
3529 			target->initiator_ext = cpu_to_be64(ull);
3530 			kfree(p);
3531 			break;
3532 
3533 		case SRP_OPT_CMD_SG_ENTRIES:
3534 			if (match_int(args, &token) || token < 1 || token > 255) {
3535 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3536 					p);
3537 				goto out;
3538 			}
3539 			target->cmd_sg_cnt = token;
3540 			break;
3541 
3542 		case SRP_OPT_ALLOW_EXT_SG:
3543 			if (match_int(args, &token)) {
3544 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3545 				goto out;
3546 			}
3547 			target->allow_ext_sg = !!token;
3548 			break;
3549 
3550 		case SRP_OPT_SG_TABLESIZE:
3551 			if (match_int(args, &token) || token < 1 ||
3552 					token > SG_MAX_SEGMENTS) {
3553 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3554 					p);
3555 				goto out;
3556 			}
3557 			target->sg_tablesize = token;
3558 			break;
3559 
3560 		case SRP_OPT_COMP_VECTOR:
3561 			if (match_int(args, &token) || token < 0) {
3562 				pr_warn("bad comp_vector parameter '%s'\n", p);
3563 				goto out;
3564 			}
3565 			target->comp_vector = token;
3566 			break;
3567 
3568 		case SRP_OPT_TL_RETRY_COUNT:
3569 			if (match_int(args, &token) || token < 2 || token > 7) {
3570 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3571 					p);
3572 				goto out;
3573 			}
3574 			target->tl_retry_count = token;
3575 			break;
3576 
3577 		case SRP_OPT_MAX_IT_IU_SIZE:
3578 			if (match_int(args, &token) || token < 0) {
3579 				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3580 				goto out;
3581 			}
3582 			target->max_it_iu_size = token;
3583 			break;
3584 
3585 		case SRP_OPT_CH_COUNT:
3586 			if (match_int(args, &token) || token < 1) {
3587 				pr_warn("bad channel count %s\n", p);
3588 				goto out;
3589 			}
3590 			target->ch_count = token;
3591 			break;
3592 
3593 		default:
3594 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3595 				p);
3596 			goto out;
3597 		}
3598 	}
3599 
3600 	for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3601 		if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3602 			ret = 0;
3603 			break;
3604 		}
3605 	}
3606 	if (ret)
3607 		pr_warn("target creation request is missing one or more parameters\n");
3608 
3609 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3610 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3611 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3612 			target->scsi_host->cmd_per_lun,
3613 			target->scsi_host->can_queue);
3614 
3615 out:
3616 	kfree(options);
3617 	return ret;
3618 }
3619 
3620 static ssize_t srp_create_target(struct device *dev,
3621 				 struct device_attribute *attr,
3622 				 const char *buf, size_t count)
3623 {
3624 	struct srp_host *host =
3625 		container_of(dev, struct srp_host, dev);
3626 	struct Scsi_Host *target_host;
3627 	struct srp_target_port *target;
3628 	struct srp_rdma_ch *ch;
3629 	struct srp_device *srp_dev = host->srp_dev;
3630 	struct ib_device *ibdev = srp_dev->dev;
3631 	int ret, node_idx, node, cpu, i;
3632 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3633 	bool multich = false;
3634 	uint32_t max_iu_len;
3635 
3636 	target_host = scsi_host_alloc(&srp_template,
3637 				      sizeof (struct srp_target_port));
3638 	if (!target_host)
3639 		return -ENOMEM;
3640 
3641 	target_host->transportt  = ib_srp_transport_template;
3642 	target_host->max_channel = 0;
3643 	target_host->max_id      = 1;
3644 	target_host->max_lun     = -1LL;
3645 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3646 	target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3647 
3648 	if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3649 		target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3650 
3651 	target = host_to_target(target_host);
3652 
3653 	target->net		= kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3654 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3655 	target->scsi_host	= target_host;
3656 	target->srp_host	= host;
3657 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3658 	target->global_rkey	= host->srp_dev->global_rkey;
3659 	target->cmd_sg_cnt	= cmd_sg_entries;
3660 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3661 	target->allow_ext_sg	= allow_ext_sg;
3662 	target->tl_retry_count	= 7;
3663 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3664 
3665 	/*
3666 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3667 	 * before this function returns.
3668 	 */
3669 	scsi_host_get(target->scsi_host);
3670 
3671 	ret = mutex_lock_interruptible(&host->add_target_mutex);
3672 	if (ret < 0)
3673 		goto put;
3674 
3675 	ret = srp_parse_options(target->net, buf, target);
3676 	if (ret)
3677 		goto out;
3678 
3679 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3680 
3681 	if (!srp_conn_unique(target->srp_host, target)) {
3682 		if (target->using_rdma_cm) {
3683 			shost_printk(KERN_INFO, target->scsi_host,
3684 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3685 				     be64_to_cpu(target->id_ext),
3686 				     be64_to_cpu(target->ioc_guid),
3687 				     &target->rdma_cm.dst);
3688 		} else {
3689 			shost_printk(KERN_INFO, target->scsi_host,
3690 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3691 				     be64_to_cpu(target->id_ext),
3692 				     be64_to_cpu(target->ioc_guid),
3693 				     be64_to_cpu(target->initiator_ext));
3694 		}
3695 		ret = -EEXIST;
3696 		goto out;
3697 	}
3698 
3699 	if (!srp_dev->has_fr && !target->allow_ext_sg &&
3700 	    target->cmd_sg_cnt < target->sg_tablesize) {
3701 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3702 		target->sg_tablesize = target->cmd_sg_cnt;
3703 	}
3704 
3705 	if (srp_dev->use_fast_reg) {
3706 		bool gaps_reg = (ibdev->attrs.device_cap_flags &
3707 				 IB_DEVICE_SG_GAPS_REG);
3708 
3709 		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3710 				  (ilog2(srp_dev->mr_page_size) - 9);
3711 		if (!gaps_reg) {
3712 			/*
3713 			 * FR can only map one HCA page per entry. If the start
3714 			 * address is not aligned on a HCA page boundary two
3715 			 * entries will be used for the head and the tail
3716 			 * although these two entries combined contain at most
3717 			 * one HCA page of data. Hence the "+ 1" in the
3718 			 * calculation below.
3719 			 *
3720 			 * The indirect data buffer descriptor is contiguous
3721 			 * so the memory for that buffer will only be
3722 			 * registered if register_always is true. Hence add
3723 			 * one to mr_per_cmd if register_always has been set.
3724 			 */
3725 			mr_per_cmd = register_always +
3726 				(target->scsi_host->max_sectors + 1 +
3727 				 max_sectors_per_mr - 1) / max_sectors_per_mr;
3728 		} else {
3729 			mr_per_cmd = register_always +
3730 				(target->sg_tablesize +
3731 				 srp_dev->max_pages_per_mr - 1) /
3732 				srp_dev->max_pages_per_mr;
3733 		}
3734 		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3735 			 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3736 			 max_sectors_per_mr, mr_per_cmd);
3737 	}
3738 
3739 	target_host->sg_tablesize = target->sg_tablesize;
3740 	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3741 	target->mr_per_cmd = mr_per_cmd;
3742 	target->indirect_size = target->sg_tablesize *
3743 				sizeof (struct srp_direct_buf);
3744 	max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3745 				       srp_use_imm_data,
3746 				       target->max_it_iu_size);
3747 
3748 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3749 	INIT_WORK(&target->remove_work, srp_remove_work);
3750 	spin_lock_init(&target->lock);
3751 	ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3752 	if (ret)
3753 		goto out;
3754 
3755 	ret = -ENOMEM;
3756 	if (target->ch_count == 0)
3757 		target->ch_count =
3758 			max_t(unsigned int, num_online_nodes(),
3759 			      min(ch_count ?:
3760 					  min(4 * num_online_nodes(),
3761 					      ibdev->num_comp_vectors),
3762 				  num_online_cpus()));
3763 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3764 			     GFP_KERNEL);
3765 	if (!target->ch)
3766 		goto out;
3767 
3768 	node_idx = 0;
3769 	for_each_online_node(node) {
3770 		const int ch_start = (node_idx * target->ch_count /
3771 				      num_online_nodes());
3772 		const int ch_end = ((node_idx + 1) * target->ch_count /
3773 				    num_online_nodes());
3774 		const int cv_start = node_idx * ibdev->num_comp_vectors /
3775 				     num_online_nodes();
3776 		const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3777 				   num_online_nodes();
3778 		int cpu_idx = 0;
3779 
3780 		for_each_online_cpu(cpu) {
3781 			if (cpu_to_node(cpu) != node)
3782 				continue;
3783 			if (ch_start + cpu_idx >= ch_end)
3784 				continue;
3785 			ch = &target->ch[ch_start + cpu_idx];
3786 			ch->target = target;
3787 			ch->comp_vector = cv_start == cv_end ? cv_start :
3788 				cv_start + cpu_idx % (cv_end - cv_start);
3789 			spin_lock_init(&ch->lock);
3790 			INIT_LIST_HEAD(&ch->free_tx);
3791 			ret = srp_new_cm_id(ch);
3792 			if (ret)
3793 				goto err_disconnect;
3794 
3795 			ret = srp_create_ch_ib(ch);
3796 			if (ret)
3797 				goto err_disconnect;
3798 
3799 			ret = srp_alloc_req_data(ch);
3800 			if (ret)
3801 				goto err_disconnect;
3802 
3803 			ret = srp_connect_ch(ch, max_iu_len, multich);
3804 			if (ret) {
3805 				char dst[64];
3806 
3807 				if (target->using_rdma_cm)
3808 					snprintf(dst, sizeof(dst), "%pIS",
3809 						 &target->rdma_cm.dst);
3810 				else
3811 					snprintf(dst, sizeof(dst), "%pI6",
3812 						 target->ib_cm.orig_dgid.raw);
3813 				shost_printk(KERN_ERR, target->scsi_host,
3814 					     PFX "Connection %d/%d to %s failed\n",
3815 					     ch_start + cpu_idx,
3816 					     target->ch_count, dst);
3817 				if (node_idx == 0 && cpu_idx == 0) {
3818 					goto free_ch;
3819 				} else {
3820 					srp_free_ch_ib(target, ch);
3821 					srp_free_req_data(target, ch);
3822 					target->ch_count = ch - target->ch;
3823 					goto connected;
3824 				}
3825 			}
3826 
3827 			multich = true;
3828 			cpu_idx++;
3829 		}
3830 		node_idx++;
3831 	}
3832 
3833 connected:
3834 	target->scsi_host->nr_hw_queues = target->ch_count;
3835 
3836 	ret = srp_add_target(host, target);
3837 	if (ret)
3838 		goto err_disconnect;
3839 
3840 	if (target->state != SRP_TARGET_REMOVED) {
3841 		if (target->using_rdma_cm) {
3842 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3843 				     "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3844 				     be64_to_cpu(target->id_ext),
3845 				     be64_to_cpu(target->ioc_guid),
3846 				     target->sgid.raw, &target->rdma_cm.dst);
3847 		} else {
3848 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3849 				     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3850 				     be64_to_cpu(target->id_ext),
3851 				     be64_to_cpu(target->ioc_guid),
3852 				     be16_to_cpu(target->ib_cm.pkey),
3853 				     be64_to_cpu(target->ib_cm.service_id),
3854 				     target->sgid.raw,
3855 				     target->ib_cm.orig_dgid.raw);
3856 		}
3857 	}
3858 
3859 	ret = count;
3860 
3861 out:
3862 	mutex_unlock(&host->add_target_mutex);
3863 
3864 put:
3865 	scsi_host_put(target->scsi_host);
3866 	if (ret < 0) {
3867 		/*
3868 		 * If a call to srp_remove_target() has not been scheduled,
3869 		 * drop the network namespace reference now that was obtained
3870 		 * earlier in this function.
3871 		 */
3872 		if (target->state != SRP_TARGET_REMOVED)
3873 			kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3874 		scsi_host_put(target->scsi_host);
3875 	}
3876 
3877 	return ret;
3878 
3879 err_disconnect:
3880 	srp_disconnect_target(target);
3881 
3882 free_ch:
3883 	for (i = 0; i < target->ch_count; i++) {
3884 		ch = &target->ch[i];
3885 		srp_free_ch_ib(target, ch);
3886 		srp_free_req_data(target, ch);
3887 	}
3888 
3889 	kfree(target->ch);
3890 	goto out;
3891 }
3892 
3893 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3894 
3895 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3896 			  char *buf)
3897 {
3898 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3899 
3900 	return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3901 }
3902 
3903 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3904 
3905 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3906 			 char *buf)
3907 {
3908 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3909 
3910 	return sysfs_emit(buf, "%d\n", host->port);
3911 }
3912 
3913 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3914 
3915 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3916 {
3917 	struct srp_host *host;
3918 
3919 	host = kzalloc(sizeof *host, GFP_KERNEL);
3920 	if (!host)
3921 		return NULL;
3922 
3923 	INIT_LIST_HEAD(&host->target_list);
3924 	spin_lock_init(&host->target_lock);
3925 	init_completion(&host->released);
3926 	mutex_init(&host->add_target_mutex);
3927 	host->srp_dev = device;
3928 	host->port = port;
3929 
3930 	host->dev.class = &srp_class;
3931 	host->dev.parent = device->dev->dev.parent;
3932 	dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3933 		     port);
3934 
3935 	if (device_register(&host->dev))
3936 		goto free_host;
3937 	if (device_create_file(&host->dev, &dev_attr_add_target))
3938 		goto err_class;
3939 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3940 		goto err_class;
3941 	if (device_create_file(&host->dev, &dev_attr_port))
3942 		goto err_class;
3943 
3944 	return host;
3945 
3946 err_class:
3947 	device_unregister(&host->dev);
3948 
3949 free_host:
3950 	kfree(host);
3951 
3952 	return NULL;
3953 }
3954 
3955 static void srp_rename_dev(struct ib_device *device, void *client_data)
3956 {
3957 	struct srp_device *srp_dev = client_data;
3958 	struct srp_host *host, *tmp_host;
3959 
3960 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3961 		char name[IB_DEVICE_NAME_MAX + 8];
3962 
3963 		snprintf(name, sizeof(name), "srp-%s-%d",
3964 			 dev_name(&device->dev), host->port);
3965 		device_rename(&host->dev, name);
3966 	}
3967 }
3968 
3969 static int srp_add_one(struct ib_device *device)
3970 {
3971 	struct srp_device *srp_dev;
3972 	struct ib_device_attr *attr = &device->attrs;
3973 	struct srp_host *host;
3974 	int mr_page_shift;
3975 	unsigned int p;
3976 	u64 max_pages_per_mr;
3977 	unsigned int flags = 0;
3978 
3979 	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3980 	if (!srp_dev)
3981 		return -ENOMEM;
3982 
3983 	/*
3984 	 * Use the smallest page size supported by the HCA, down to a
3985 	 * minimum of 4096 bytes. We're unlikely to build large sglists
3986 	 * out of smaller entries.
3987 	 */
3988 	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
3989 	srp_dev->mr_page_size	= 1 << mr_page_shift;
3990 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
3991 	max_pages_per_mr	= attr->max_mr_size;
3992 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
3993 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3994 		 attr->max_mr_size, srp_dev->mr_page_size,
3995 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3996 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3997 					  max_pages_per_mr);
3998 
3999 	srp_dev->has_fr = (attr->device_cap_flags &
4000 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
4001 	if (!never_register && !srp_dev->has_fr)
4002 		dev_warn(&device->dev, "FR is not supported\n");
4003 	else if (!never_register &&
4004 		 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
4005 		srp_dev->use_fast_reg = srp_dev->has_fr;
4006 
4007 	if (never_register || !register_always || !srp_dev->has_fr)
4008 		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4009 
4010 	if (srp_dev->use_fast_reg) {
4011 		srp_dev->max_pages_per_mr =
4012 			min_t(u32, srp_dev->max_pages_per_mr,
4013 			      attr->max_fast_reg_page_list_len);
4014 	}
4015 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
4016 				   srp_dev->max_pages_per_mr;
4017 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4018 		 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
4019 		 attr->max_fast_reg_page_list_len,
4020 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4021 
4022 	INIT_LIST_HEAD(&srp_dev->dev_list);
4023 
4024 	srp_dev->dev = device;
4025 	srp_dev->pd  = ib_alloc_pd(device, flags);
4026 	if (IS_ERR(srp_dev->pd)) {
4027 		int ret = PTR_ERR(srp_dev->pd);
4028 
4029 		kfree(srp_dev);
4030 		return ret;
4031 	}
4032 
4033 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4034 		srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4035 		WARN_ON_ONCE(srp_dev->global_rkey == 0);
4036 	}
4037 
4038 	rdma_for_each_port (device, p) {
4039 		host = srp_add_port(srp_dev, p);
4040 		if (host)
4041 			list_add_tail(&host->list, &srp_dev->dev_list);
4042 	}
4043 
4044 	ib_set_client_data(device, &srp_client, srp_dev);
4045 	return 0;
4046 }
4047 
4048 static void srp_remove_one(struct ib_device *device, void *client_data)
4049 {
4050 	struct srp_device *srp_dev;
4051 	struct srp_host *host, *tmp_host;
4052 	struct srp_target_port *target;
4053 
4054 	srp_dev = client_data;
4055 
4056 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4057 		device_unregister(&host->dev);
4058 		/*
4059 		 * Wait for the sysfs entry to go away, so that no new
4060 		 * target ports can be created.
4061 		 */
4062 		wait_for_completion(&host->released);
4063 
4064 		/*
4065 		 * Remove all target ports.
4066 		 */
4067 		spin_lock(&host->target_lock);
4068 		list_for_each_entry(target, &host->target_list, list)
4069 			srp_queue_remove_work(target);
4070 		spin_unlock(&host->target_lock);
4071 
4072 		/*
4073 		 * Wait for tl_err and target port removal tasks.
4074 		 */
4075 		flush_workqueue(system_long_wq);
4076 		flush_workqueue(srp_remove_wq);
4077 
4078 		kfree(host);
4079 	}
4080 
4081 	ib_dealloc_pd(srp_dev->pd);
4082 
4083 	kfree(srp_dev);
4084 }
4085 
4086 static struct srp_function_template ib_srp_transport_functions = {
4087 	.has_rport_state	 = true,
4088 	.reset_timer_if_blocked	 = true,
4089 	.reconnect_delay	 = &srp_reconnect_delay,
4090 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
4091 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
4092 	.reconnect		 = srp_rport_reconnect,
4093 	.rport_delete		 = srp_rport_delete,
4094 	.terminate_rport_io	 = srp_terminate_io,
4095 };
4096 
4097 static int __init srp_init_module(void)
4098 {
4099 	int ret;
4100 
4101 	BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4102 	BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4103 	BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4104 	BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4105 
4106 	if (srp_sg_tablesize) {
4107 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4108 		if (!cmd_sg_entries)
4109 			cmd_sg_entries = srp_sg_tablesize;
4110 	}
4111 
4112 	if (!cmd_sg_entries)
4113 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4114 
4115 	if (cmd_sg_entries > 255) {
4116 		pr_warn("Clamping cmd_sg_entries to 255\n");
4117 		cmd_sg_entries = 255;
4118 	}
4119 
4120 	if (!indirect_sg_entries)
4121 		indirect_sg_entries = cmd_sg_entries;
4122 	else if (indirect_sg_entries < cmd_sg_entries) {
4123 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4124 			cmd_sg_entries);
4125 		indirect_sg_entries = cmd_sg_entries;
4126 	}
4127 
4128 	if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4129 		pr_warn("Clamping indirect_sg_entries to %u\n",
4130 			SG_MAX_SEGMENTS);
4131 		indirect_sg_entries = SG_MAX_SEGMENTS;
4132 	}
4133 
4134 	srp_remove_wq = create_workqueue("srp_remove");
4135 	if (!srp_remove_wq) {
4136 		ret = -ENOMEM;
4137 		goto out;
4138 	}
4139 
4140 	ret = -ENOMEM;
4141 	ib_srp_transport_template =
4142 		srp_attach_transport(&ib_srp_transport_functions);
4143 	if (!ib_srp_transport_template)
4144 		goto destroy_wq;
4145 
4146 	ret = class_register(&srp_class);
4147 	if (ret) {
4148 		pr_err("couldn't register class infiniband_srp\n");
4149 		goto release_tr;
4150 	}
4151 
4152 	ib_sa_register_client(&srp_sa_client);
4153 
4154 	ret = ib_register_client(&srp_client);
4155 	if (ret) {
4156 		pr_err("couldn't register IB client\n");
4157 		goto unreg_sa;
4158 	}
4159 
4160 out:
4161 	return ret;
4162 
4163 unreg_sa:
4164 	ib_sa_unregister_client(&srp_sa_client);
4165 	class_unregister(&srp_class);
4166 
4167 release_tr:
4168 	srp_release_transport(ib_srp_transport_template);
4169 
4170 destroy_wq:
4171 	destroy_workqueue(srp_remove_wq);
4172 	goto out;
4173 }
4174 
4175 static void __exit srp_cleanup_module(void)
4176 {
4177 	ib_unregister_client(&srp_client);
4178 	ib_sa_unregister_client(&srp_sa_client);
4179 	class_unregister(&srp_class);
4180 	srp_release_transport(ib_srp_transport_template);
4181 	destroy_workqueue(srp_remove_wq);
4182 }
4183 
4184 module_init(srp_init_module);
4185 module_exit(srp_cleanup_module);
4186