1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
46 
47 #include <linux/atomic.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
53 #include <scsi/srp.h>
54 #include <scsi/scsi_transport_srp.h>
55 
56 #include "ib_srp.h"
57 
58 #define DRV_NAME	"ib_srp"
59 #define PFX		DRV_NAME ": "
60 
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 
65 #if !defined(CONFIG_DYNAMIC_DEBUG)
66 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
68 #endif
69 
70 static unsigned int srp_sg_tablesize;
71 static unsigned int cmd_sg_entries;
72 static unsigned int indirect_sg_entries;
73 static bool allow_ext_sg;
74 static bool register_always = true;
75 static bool never_register;
76 static int topspin_workarounds = 1;
77 
78 module_param(srp_sg_tablesize, uint, 0444);
79 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
80 
81 module_param(cmd_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(cmd_sg_entries,
83 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
84 
85 module_param(indirect_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(indirect_sg_entries,
87 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
88 
89 module_param(allow_ext_sg, bool, 0444);
90 MODULE_PARM_DESC(allow_ext_sg,
91 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
92 
93 module_param(topspin_workarounds, int, 0444);
94 MODULE_PARM_DESC(topspin_workarounds,
95 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
96 
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 		 "Use memory registration even for contiguous memory regions");
100 
101 module_param(never_register, bool, 0444);
102 MODULE_PARM_DESC(never_register, "Never register memory");
103 
104 static const struct kernel_param_ops srp_tmo_ops;
105 
106 static int srp_reconnect_delay = 10;
107 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
108 		S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
110 
111 static int srp_fast_io_fail_tmo = 15;
112 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
113 		S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(fast_io_fail_tmo,
115 		 "Number of seconds between the observation of a transport"
116 		 " layer error and failing all I/O. \"off\" means that this"
117 		 " functionality is disabled.");
118 
119 static int srp_dev_loss_tmo = 600;
120 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
121 		S_IRUGO | S_IWUSR);
122 MODULE_PARM_DESC(dev_loss_tmo,
123 		 "Maximum number of seconds that the SRP transport should"
124 		 " insulate transport layer errors. After this time has been"
125 		 " exceeded the SCSI host is removed. Should be"
126 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 		 " this functionality is disabled.");
129 
130 static bool srp_use_imm_data = true;
131 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132 MODULE_PARM_DESC(use_imm_data,
133 		 "Whether or not to request permission to use immediate data during SRP login.");
134 
135 static unsigned int srp_max_imm_data = 8 * 1024;
136 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138 
139 static unsigned ch_count;
140 module_param(ch_count, uint, 0444);
141 MODULE_PARM_DESC(ch_count,
142 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143 
144 static int srp_add_one(struct ib_device *device);
145 static void srp_remove_one(struct ib_device *device, void *client_data);
146 static void srp_rename_dev(struct ib_device *device, void *client_data);
147 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
149 		const char *opname);
150 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151 			     const struct ib_cm_event *event);
152 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153 			       struct rdma_cm_event *event);
154 
155 static struct scsi_transport_template *ib_srp_transport_template;
156 static struct workqueue_struct *srp_remove_wq;
157 
158 static struct ib_client srp_client = {
159 	.name   = "srp",
160 	.add    = srp_add_one,
161 	.remove = srp_remove_one,
162 	.rename = srp_rename_dev
163 };
164 
165 static struct ib_sa_client srp_sa_client;
166 
167 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
168 {
169 	int tmo = *(int *)kp->arg;
170 
171 	if (tmo >= 0)
172 		return sysfs_emit(buffer, "%d\n", tmo);
173 	else
174 		return sysfs_emit(buffer, "off\n");
175 }
176 
177 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
178 {
179 	int tmo, res;
180 
181 	res = srp_parse_tmo(&tmo, val);
182 	if (res)
183 		goto out;
184 
185 	if (kp->arg == &srp_reconnect_delay)
186 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
187 				    srp_dev_loss_tmo);
188 	else if (kp->arg == &srp_fast_io_fail_tmo)
189 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
190 	else
191 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
192 				    tmo);
193 	if (res)
194 		goto out;
195 	*(int *)kp->arg = tmo;
196 
197 out:
198 	return res;
199 }
200 
201 static const struct kernel_param_ops srp_tmo_ops = {
202 	.get = srp_tmo_get,
203 	.set = srp_tmo_set,
204 };
205 
206 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
207 {
208 	return (struct srp_target_port *) host->hostdata;
209 }
210 
211 static const char *srp_target_info(struct Scsi_Host *host)
212 {
213 	return host_to_target(host)->target_name;
214 }
215 
216 static int srp_target_is_topspin(struct srp_target_port *target)
217 {
218 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
219 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
220 
221 	return topspin_workarounds &&
222 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
224 }
225 
226 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
227 				   gfp_t gfp_mask,
228 				   enum dma_data_direction direction)
229 {
230 	struct srp_iu *iu;
231 
232 	iu = kmalloc(sizeof *iu, gfp_mask);
233 	if (!iu)
234 		goto out;
235 
236 	iu->buf = kzalloc(size, gfp_mask);
237 	if (!iu->buf)
238 		goto out_free_iu;
239 
240 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
241 				    direction);
242 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
243 		goto out_free_buf;
244 
245 	iu->size      = size;
246 	iu->direction = direction;
247 
248 	return iu;
249 
250 out_free_buf:
251 	kfree(iu->buf);
252 out_free_iu:
253 	kfree(iu);
254 out:
255 	return NULL;
256 }
257 
258 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
259 {
260 	if (!iu)
261 		return;
262 
263 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 			    iu->direction);
265 	kfree(iu->buf);
266 	kfree(iu);
267 }
268 
269 static void srp_qp_event(struct ib_event *event, void *context)
270 {
271 	pr_debug("QP event %s (%d)\n",
272 		 ib_event_msg(event->event), event->event);
273 }
274 
275 static int srp_init_ib_qp(struct srp_target_port *target,
276 			  struct ib_qp *qp)
277 {
278 	struct ib_qp_attr *attr;
279 	int ret;
280 
281 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
282 	if (!attr)
283 		return -ENOMEM;
284 
285 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286 				  target->srp_host->port,
287 				  be16_to_cpu(target->ib_cm.pkey),
288 				  &attr->pkey_index);
289 	if (ret)
290 		goto out;
291 
292 	attr->qp_state        = IB_QPS_INIT;
293 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294 				    IB_ACCESS_REMOTE_WRITE);
295 	attr->port_num        = target->srp_host->port;
296 
297 	ret = ib_modify_qp(qp, attr,
298 			   IB_QP_STATE		|
299 			   IB_QP_PKEY_INDEX	|
300 			   IB_QP_ACCESS_FLAGS	|
301 			   IB_QP_PORT);
302 
303 out:
304 	kfree(attr);
305 	return ret;
306 }
307 
308 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
309 {
310 	struct srp_target_port *target = ch->target;
311 	struct ib_cm_id *new_cm_id;
312 
313 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
314 				    srp_ib_cm_handler, ch);
315 	if (IS_ERR(new_cm_id))
316 		return PTR_ERR(new_cm_id);
317 
318 	if (ch->ib_cm.cm_id)
319 		ib_destroy_cm_id(ch->ib_cm.cm_id);
320 	ch->ib_cm.cm_id = new_cm_id;
321 	if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322 			    target->srp_host->port))
323 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
324 	else
325 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326 	ch->ib_cm.path.sgid = target->sgid;
327 	ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328 	ch->ib_cm.path.pkey = target->ib_cm.pkey;
329 	ch->ib_cm.path.service_id = target->ib_cm.service_id;
330 
331 	return 0;
332 }
333 
334 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
335 {
336 	struct srp_target_port *target = ch->target;
337 	struct rdma_cm_id *new_cm_id;
338 	int ret;
339 
340 	new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341 				   RDMA_PS_TCP, IB_QPT_RC);
342 	if (IS_ERR(new_cm_id)) {
343 		ret = PTR_ERR(new_cm_id);
344 		new_cm_id = NULL;
345 		goto out;
346 	}
347 
348 	init_completion(&ch->done);
349 	ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
350 				&target->rdma_cm.src.sa : NULL,
351 				&target->rdma_cm.dst.sa,
352 				SRP_PATH_REC_TIMEOUT_MS);
353 	if (ret) {
354 		pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355 		       &target->rdma_cm.src, &target->rdma_cm.dst, ret);
356 		goto out;
357 	}
358 	ret = wait_for_completion_interruptible(&ch->done);
359 	if (ret < 0)
360 		goto out;
361 
362 	ret = ch->status;
363 	if (ret) {
364 		pr_err("Resolving address %pISpsc failed (%d)\n",
365 		       &target->rdma_cm.dst, ret);
366 		goto out;
367 	}
368 
369 	swap(ch->rdma_cm.cm_id, new_cm_id);
370 
371 out:
372 	if (new_cm_id)
373 		rdma_destroy_id(new_cm_id);
374 
375 	return ret;
376 }
377 
378 static int srp_new_cm_id(struct srp_rdma_ch *ch)
379 {
380 	struct srp_target_port *target = ch->target;
381 
382 	return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383 		srp_new_ib_cm_id(ch);
384 }
385 
386 /**
387  * srp_destroy_fr_pool() - free the resources owned by a pool
388  * @pool: Fast registration pool to be destroyed.
389  */
390 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
391 {
392 	int i;
393 	struct srp_fr_desc *d;
394 
395 	if (!pool)
396 		return;
397 
398 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
399 		if (d->mr)
400 			ib_dereg_mr(d->mr);
401 	}
402 	kfree(pool);
403 }
404 
405 /**
406  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407  * @device:            IB device to allocate fast registration descriptors for.
408  * @pd:                Protection domain associated with the FR descriptors.
409  * @pool_size:         Number of descriptors to allocate.
410  * @max_page_list_len: Maximum fast registration work request page list length.
411  */
412 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413 					      struct ib_pd *pd, int pool_size,
414 					      int max_page_list_len)
415 {
416 	struct srp_fr_pool *pool;
417 	struct srp_fr_desc *d;
418 	struct ib_mr *mr;
419 	int i, ret = -EINVAL;
420 	enum ib_mr_type mr_type;
421 
422 	if (pool_size <= 0)
423 		goto err;
424 	ret = -ENOMEM;
425 	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
426 	if (!pool)
427 		goto err;
428 	pool->size = pool_size;
429 	pool->max_page_list_len = max_page_list_len;
430 	spin_lock_init(&pool->lock);
431 	INIT_LIST_HEAD(&pool->free_list);
432 
433 	if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
434 		mr_type = IB_MR_TYPE_SG_GAPS;
435 	else
436 		mr_type = IB_MR_TYPE_MEM_REG;
437 
438 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
439 		mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
440 		if (IS_ERR(mr)) {
441 			ret = PTR_ERR(mr);
442 			if (ret == -ENOMEM)
443 				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 					dev_name(&device->dev));
445 			goto destroy_pool;
446 		}
447 		d->mr = mr;
448 		list_add_tail(&d->entry, &pool->free_list);
449 	}
450 
451 out:
452 	return pool;
453 
454 destroy_pool:
455 	srp_destroy_fr_pool(pool);
456 
457 err:
458 	pool = ERR_PTR(ret);
459 	goto out;
460 }
461 
462 /**
463  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464  * @pool: Pool to obtain descriptor from.
465  */
466 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
467 {
468 	struct srp_fr_desc *d = NULL;
469 	unsigned long flags;
470 
471 	spin_lock_irqsave(&pool->lock, flags);
472 	if (!list_empty(&pool->free_list)) {
473 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
474 		list_del(&d->entry);
475 	}
476 	spin_unlock_irqrestore(&pool->lock, flags);
477 
478 	return d;
479 }
480 
481 /**
482  * srp_fr_pool_put() - put an FR descriptor back in the free list
483  * @pool: Pool the descriptor was allocated from.
484  * @desc: Pointer to an array of fast registration descriptor pointers.
485  * @n:    Number of descriptors to put back.
486  *
487  * Note: The caller must already have queued an invalidation request for
488  * desc->mr->rkey before calling this function.
489  */
490 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491 			    int n)
492 {
493 	unsigned long flags;
494 	int i;
495 
496 	spin_lock_irqsave(&pool->lock, flags);
497 	for (i = 0; i < n; i++)
498 		list_add(&desc[i]->entry, &pool->free_list);
499 	spin_unlock_irqrestore(&pool->lock, flags);
500 }
501 
502 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
503 {
504 	struct srp_device *dev = target->srp_host->srp_dev;
505 
506 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
507 				  dev->max_pages_per_mr);
508 }
509 
510 /**
511  * srp_destroy_qp() - destroy an RDMA queue pair
512  * @ch: SRP RDMA channel.
513  *
514  * Drain the qp before destroying it.  This avoids that the receive
515  * completion handler can access the queue pair while it is
516  * being destroyed.
517  */
518 static void srp_destroy_qp(struct srp_rdma_ch *ch)
519 {
520 	spin_lock_irq(&ch->lock);
521 	ib_process_cq_direct(ch->send_cq, -1);
522 	spin_unlock_irq(&ch->lock);
523 
524 	ib_drain_qp(ch->qp);
525 	ib_destroy_qp(ch->qp);
526 }
527 
528 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
529 {
530 	struct srp_target_port *target = ch->target;
531 	struct srp_device *dev = target->srp_host->srp_dev;
532 	const struct ib_device_attr *attr = &dev->dev->attrs;
533 	struct ib_qp_init_attr *init_attr;
534 	struct ib_cq *recv_cq, *send_cq;
535 	struct ib_qp *qp;
536 	struct srp_fr_pool *fr_pool = NULL;
537 	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
538 	int ret;
539 
540 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
541 	if (!init_attr)
542 		return -ENOMEM;
543 
544 	/* queue_size + 1 for ib_drain_rq() */
545 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546 				ch->comp_vector, IB_POLL_SOFTIRQ);
547 	if (IS_ERR(recv_cq)) {
548 		ret = PTR_ERR(recv_cq);
549 		goto err;
550 	}
551 
552 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553 				ch->comp_vector, IB_POLL_DIRECT);
554 	if (IS_ERR(send_cq)) {
555 		ret = PTR_ERR(send_cq);
556 		goto err_recv_cq;
557 	}
558 
559 	init_attr->event_handler       = srp_qp_event;
560 	init_attr->cap.max_send_wr     = m * target->queue_size;
561 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
562 	init_attr->cap.max_recv_sge    = 1;
563 	init_attr->cap.max_send_sge    = min(SRP_MAX_SGE, attr->max_send_sge);
564 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
565 	init_attr->qp_type             = IB_QPT_RC;
566 	init_attr->send_cq             = send_cq;
567 	init_attr->recv_cq             = recv_cq;
568 
569 	ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
570 
571 	if (target->using_rdma_cm) {
572 		ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573 		qp = ch->rdma_cm.cm_id->qp;
574 	} else {
575 		qp = ib_create_qp(dev->pd, init_attr);
576 		if (!IS_ERR(qp)) {
577 			ret = srp_init_ib_qp(target, qp);
578 			if (ret)
579 				ib_destroy_qp(qp);
580 		} else {
581 			ret = PTR_ERR(qp);
582 		}
583 	}
584 	if (ret) {
585 		pr_err("QP creation failed for dev %s: %d\n",
586 		       dev_name(&dev->dev->dev), ret);
587 		goto err_send_cq;
588 	}
589 
590 	if (dev->use_fast_reg) {
591 		fr_pool = srp_alloc_fr_pool(target);
592 		if (IS_ERR(fr_pool)) {
593 			ret = PTR_ERR(fr_pool);
594 			shost_printk(KERN_WARNING, target->scsi_host, PFX
595 				     "FR pool allocation failed (%d)\n", ret);
596 			goto err_qp;
597 		}
598 	}
599 
600 	if (ch->qp)
601 		srp_destroy_qp(ch);
602 	if (ch->recv_cq)
603 		ib_free_cq(ch->recv_cq);
604 	if (ch->send_cq)
605 		ib_free_cq(ch->send_cq);
606 
607 	ch->qp = qp;
608 	ch->recv_cq = recv_cq;
609 	ch->send_cq = send_cq;
610 
611 	if (dev->use_fast_reg) {
612 		if (ch->fr_pool)
613 			srp_destroy_fr_pool(ch->fr_pool);
614 		ch->fr_pool = fr_pool;
615 	}
616 
617 	kfree(init_attr);
618 	return 0;
619 
620 err_qp:
621 	if (target->using_rdma_cm)
622 		rdma_destroy_qp(ch->rdma_cm.cm_id);
623 	else
624 		ib_destroy_qp(qp);
625 
626 err_send_cq:
627 	ib_free_cq(send_cq);
628 
629 err_recv_cq:
630 	ib_free_cq(recv_cq);
631 
632 err:
633 	kfree(init_attr);
634 	return ret;
635 }
636 
637 /*
638  * Note: this function may be called without srp_alloc_iu_bufs() having been
639  * invoked. Hence the ch->[rt]x_ring checks.
640  */
641 static void srp_free_ch_ib(struct srp_target_port *target,
642 			   struct srp_rdma_ch *ch)
643 {
644 	struct srp_device *dev = target->srp_host->srp_dev;
645 	int i;
646 
647 	if (!ch->target)
648 		return;
649 
650 	if (target->using_rdma_cm) {
651 		if (ch->rdma_cm.cm_id) {
652 			rdma_destroy_id(ch->rdma_cm.cm_id);
653 			ch->rdma_cm.cm_id = NULL;
654 		}
655 	} else {
656 		if (ch->ib_cm.cm_id) {
657 			ib_destroy_cm_id(ch->ib_cm.cm_id);
658 			ch->ib_cm.cm_id = NULL;
659 		}
660 	}
661 
662 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
663 	if (!ch->qp)
664 		return;
665 
666 	if (dev->use_fast_reg) {
667 		if (ch->fr_pool)
668 			srp_destroy_fr_pool(ch->fr_pool);
669 	}
670 
671 	srp_destroy_qp(ch);
672 	ib_free_cq(ch->send_cq);
673 	ib_free_cq(ch->recv_cq);
674 
675 	/*
676 	 * Avoid that the SCSI error handler tries to use this channel after
677 	 * it has been freed. The SCSI error handler can namely continue
678 	 * trying to perform recovery actions after scsi_remove_host()
679 	 * returned.
680 	 */
681 	ch->target = NULL;
682 
683 	ch->qp = NULL;
684 	ch->send_cq = ch->recv_cq = NULL;
685 
686 	if (ch->rx_ring) {
687 		for (i = 0; i < target->queue_size; ++i)
688 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
689 		kfree(ch->rx_ring);
690 		ch->rx_ring = NULL;
691 	}
692 	if (ch->tx_ring) {
693 		for (i = 0; i < target->queue_size; ++i)
694 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
695 		kfree(ch->tx_ring);
696 		ch->tx_ring = NULL;
697 	}
698 }
699 
700 static void srp_path_rec_completion(int status,
701 				    struct sa_path_rec *pathrec,
702 				    void *ch_ptr)
703 {
704 	struct srp_rdma_ch *ch = ch_ptr;
705 	struct srp_target_port *target = ch->target;
706 
707 	ch->status = status;
708 	if (status)
709 		shost_printk(KERN_ERR, target->scsi_host,
710 			     PFX "Got failed path rec status %d\n", status);
711 	else
712 		ch->ib_cm.path = *pathrec;
713 	complete(&ch->done);
714 }
715 
716 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
717 {
718 	struct srp_target_port *target = ch->target;
719 	int ret;
720 
721 	ch->ib_cm.path.numb_path = 1;
722 
723 	init_completion(&ch->done);
724 
725 	ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
726 					       target->srp_host->srp_dev->dev,
727 					       target->srp_host->port,
728 					       &ch->ib_cm.path,
729 					       IB_SA_PATH_REC_SERVICE_ID |
730 					       IB_SA_PATH_REC_DGID	 |
731 					       IB_SA_PATH_REC_SGID	 |
732 					       IB_SA_PATH_REC_NUMB_PATH	 |
733 					       IB_SA_PATH_REC_PKEY,
734 					       SRP_PATH_REC_TIMEOUT_MS,
735 					       GFP_KERNEL,
736 					       srp_path_rec_completion,
737 					       ch, &ch->ib_cm.path_query);
738 	if (ch->ib_cm.path_query_id < 0)
739 		return ch->ib_cm.path_query_id;
740 
741 	ret = wait_for_completion_interruptible(&ch->done);
742 	if (ret < 0)
743 		return ret;
744 
745 	if (ch->status < 0)
746 		shost_printk(KERN_WARNING, target->scsi_host,
747 			     PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748 			     ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749 			     be16_to_cpu(target->ib_cm.pkey),
750 			     be64_to_cpu(target->ib_cm.service_id));
751 
752 	return ch->status;
753 }
754 
755 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
756 {
757 	struct srp_target_port *target = ch->target;
758 	int ret;
759 
760 	init_completion(&ch->done);
761 
762 	ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
763 	if (ret)
764 		return ret;
765 
766 	wait_for_completion_interruptible(&ch->done);
767 
768 	if (ch->status != 0)
769 		shost_printk(KERN_WARNING, target->scsi_host,
770 			     PFX "Path resolution failed\n");
771 
772 	return ch->status;
773 }
774 
775 static int srp_lookup_path(struct srp_rdma_ch *ch)
776 {
777 	struct srp_target_port *target = ch->target;
778 
779 	return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780 		srp_ib_lookup_path(ch);
781 }
782 
783 static u8 srp_get_subnet_timeout(struct srp_host *host)
784 {
785 	struct ib_port_attr attr;
786 	int ret;
787 	u8 subnet_timeout = 18;
788 
789 	ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790 	if (ret == 0)
791 		subnet_timeout = attr.subnet_timeout;
792 
793 	if (unlikely(subnet_timeout < 15))
794 		pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 			dev_name(&host->srp_dev->dev->dev), subnet_timeout);
796 
797 	return subnet_timeout;
798 }
799 
800 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801 			bool multich)
802 {
803 	struct srp_target_port *target = ch->target;
804 	struct {
805 		struct rdma_conn_param	  rdma_param;
806 		struct srp_login_req_rdma rdma_req;
807 		struct ib_cm_req_param	  ib_param;
808 		struct srp_login_req	  ib_req;
809 	} *req = NULL;
810 	char *ipi, *tpi;
811 	int status;
812 
813 	req = kzalloc(sizeof *req, GFP_KERNEL);
814 	if (!req)
815 		return -ENOMEM;
816 
817 	req->ib_param.flow_control = 1;
818 	req->ib_param.retry_count = target->tl_retry_count;
819 
820 	/*
821 	 * Pick some arbitrary defaults here; we could make these
822 	 * module parameters if anyone cared about setting them.
823 	 */
824 	req->ib_param.responder_resources = 4;
825 	req->ib_param.rnr_retry_count = 7;
826 	req->ib_param.max_cm_retries = 15;
827 
828 	req->ib_req.opcode = SRP_LOGIN_REQ;
829 	req->ib_req.tag = 0;
830 	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
831 	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
832 					      SRP_BUF_FORMAT_INDIRECT);
833 	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834 				 SRP_MULTICHAN_SINGLE);
835 	if (srp_use_imm_data) {
836 		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837 		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838 	}
839 
840 	if (target->using_rdma_cm) {
841 		req->rdma_param.flow_control = req->ib_param.flow_control;
842 		req->rdma_param.responder_resources =
843 			req->ib_param.responder_resources;
844 		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845 		req->rdma_param.retry_count = req->ib_param.retry_count;
846 		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847 		req->rdma_param.private_data = &req->rdma_req;
848 		req->rdma_param.private_data_len = sizeof(req->rdma_req);
849 
850 		req->rdma_req.opcode = req->ib_req.opcode;
851 		req->rdma_req.tag = req->ib_req.tag;
852 		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853 		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854 		req->rdma_req.req_flags	= req->ib_req.req_flags;
855 		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
856 
857 		ipi = req->rdma_req.initiator_port_id;
858 		tpi = req->rdma_req.target_port_id;
859 	} else {
860 		u8 subnet_timeout;
861 
862 		subnet_timeout = srp_get_subnet_timeout(target->srp_host);
863 
864 		req->ib_param.primary_path = &ch->ib_cm.path;
865 		req->ib_param.alternate_path = NULL;
866 		req->ib_param.service_id = target->ib_cm.service_id;
867 		get_random_bytes(&req->ib_param.starting_psn, 4);
868 		req->ib_param.starting_psn &= 0xffffff;
869 		req->ib_param.qp_num = ch->qp->qp_num;
870 		req->ib_param.qp_type = ch->qp->qp_type;
871 		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872 		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873 		req->ib_param.private_data = &req->ib_req;
874 		req->ib_param.private_data_len = sizeof(req->ib_req);
875 
876 		ipi = req->ib_req.initiator_port_id;
877 		tpi = req->ib_req.target_port_id;
878 	}
879 
880 	/*
881 	 * In the published SRP specification (draft rev. 16a), the
882 	 * port identifier format is 8 bytes of ID extension followed
883 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
884 	 * opposite order, so that the GUID comes first.
885 	 *
886 	 * Targets conforming to these obsolete drafts can be
887 	 * recognized by the I/O Class they report.
888 	 */
889 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
890 		memcpy(ipi,     &target->sgid.global.interface_id, 8);
891 		memcpy(ipi + 8, &target->initiator_ext, 8);
892 		memcpy(tpi,     &target->ioc_guid, 8);
893 		memcpy(tpi + 8, &target->id_ext, 8);
894 	} else {
895 		memcpy(ipi,     &target->initiator_ext, 8);
896 		memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897 		memcpy(tpi,     &target->id_ext, 8);
898 		memcpy(tpi + 8, &target->ioc_guid, 8);
899 	}
900 
901 	/*
902 	 * Topspin/Cisco SRP targets will reject our login unless we
903 	 * zero out the first 8 bytes of our initiator port ID and set
904 	 * the second 8 bytes to the local node GUID.
905 	 */
906 	if (srp_target_is_topspin(target)) {
907 		shost_printk(KERN_DEBUG, target->scsi_host,
908 			     PFX "Topspin/Cisco initiator port ID workaround "
909 			     "activated for target GUID %016llx\n",
910 			     be64_to_cpu(target->ioc_guid));
911 		memset(ipi, 0, 8);
912 		memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
913 	}
914 
915 	if (target->using_rdma_cm)
916 		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
917 	else
918 		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
919 
920 	kfree(req);
921 
922 	return status;
923 }
924 
925 static bool srp_queue_remove_work(struct srp_target_port *target)
926 {
927 	bool changed = false;
928 
929 	spin_lock_irq(&target->lock);
930 	if (target->state != SRP_TARGET_REMOVED) {
931 		target->state = SRP_TARGET_REMOVED;
932 		changed = true;
933 	}
934 	spin_unlock_irq(&target->lock);
935 
936 	if (changed)
937 		queue_work(srp_remove_wq, &target->remove_work);
938 
939 	return changed;
940 }
941 
942 static void srp_disconnect_target(struct srp_target_port *target)
943 {
944 	struct srp_rdma_ch *ch;
945 	int i, ret;
946 
947 	/* XXX should send SRP_I_LOGOUT request */
948 
949 	for (i = 0; i < target->ch_count; i++) {
950 		ch = &target->ch[i];
951 		ch->connected = false;
952 		ret = 0;
953 		if (target->using_rdma_cm) {
954 			if (ch->rdma_cm.cm_id)
955 				rdma_disconnect(ch->rdma_cm.cm_id);
956 		} else {
957 			if (ch->ib_cm.cm_id)
958 				ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
959 						      NULL, 0);
960 		}
961 		if (ret < 0) {
962 			shost_printk(KERN_DEBUG, target->scsi_host,
963 				     PFX "Sending CM DREQ failed\n");
964 		}
965 	}
966 }
967 
968 static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
969 {
970 	struct srp_target_port *target = host_to_target(shost);
971 	struct srp_device *dev = target->srp_host->srp_dev;
972 	struct ib_device *ibdev = dev->dev;
973 	struct srp_request *req = scsi_cmd_priv(cmd);
974 
975 	kfree(req->fr_list);
976 	if (req->indirect_dma_addr) {
977 		ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
978 				    target->indirect_size,
979 				    DMA_TO_DEVICE);
980 	}
981 	kfree(req->indirect_desc);
982 
983 	return 0;
984 }
985 
986 static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
987 {
988 	struct srp_target_port *target = host_to_target(shost);
989 	struct srp_device *srp_dev = target->srp_host->srp_dev;
990 	struct ib_device *ibdev = srp_dev->dev;
991 	struct srp_request *req = scsi_cmd_priv(cmd);
992 	dma_addr_t dma_addr;
993 	int ret = -ENOMEM;
994 
995 	if (srp_dev->use_fast_reg) {
996 		req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
997 					GFP_KERNEL);
998 		if (!req->fr_list)
999 			goto out;
1000 	}
1001 	req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1002 	if (!req->indirect_desc)
1003 		goto out;
1004 
1005 	dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1006 				     target->indirect_size,
1007 				     DMA_TO_DEVICE);
1008 	if (ib_dma_mapping_error(ibdev, dma_addr)) {
1009 		srp_exit_cmd_priv(shost, cmd);
1010 		goto out;
1011 	}
1012 
1013 	req->indirect_dma_addr = dma_addr;
1014 	ret = 0;
1015 
1016 out:
1017 	return ret;
1018 }
1019 
1020 /**
1021  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1022  * @shost: SCSI host whose attributes to remove from sysfs.
1023  *
1024  * Note: Any attributes defined in the host template and that did not exist
1025  * before invocation of this function will be ignored.
1026  */
1027 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1028 {
1029 	const struct attribute_group **g;
1030 	struct attribute **attr;
1031 
1032 	for (g = shost->hostt->shost_groups; *g; ++g) {
1033 		for (attr = (*g)->attrs; *attr; ++attr) {
1034 			struct device_attribute *dev_attr =
1035 				container_of(*attr, typeof(*dev_attr), attr);
1036 
1037 			device_remove_file(&shost->shost_dev, dev_attr);
1038 		}
1039 	}
1040 }
1041 
1042 static void srp_remove_target(struct srp_target_port *target)
1043 {
1044 	struct srp_rdma_ch *ch;
1045 	int i;
1046 
1047 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1048 
1049 	srp_del_scsi_host_attr(target->scsi_host);
1050 	srp_rport_get(target->rport);
1051 	srp_remove_host(target->scsi_host);
1052 	scsi_remove_host(target->scsi_host);
1053 	srp_stop_rport_timers(target->rport);
1054 	srp_disconnect_target(target);
1055 	kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1056 	for (i = 0; i < target->ch_count; i++) {
1057 		ch = &target->ch[i];
1058 		srp_free_ch_ib(target, ch);
1059 	}
1060 	cancel_work_sync(&target->tl_err_work);
1061 	srp_rport_put(target->rport);
1062 	kfree(target->ch);
1063 	target->ch = NULL;
1064 
1065 	spin_lock(&target->srp_host->target_lock);
1066 	list_del(&target->list);
1067 	spin_unlock(&target->srp_host->target_lock);
1068 
1069 	scsi_host_put(target->scsi_host);
1070 }
1071 
1072 static void srp_remove_work(struct work_struct *work)
1073 {
1074 	struct srp_target_port *target =
1075 		container_of(work, struct srp_target_port, remove_work);
1076 
1077 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1078 
1079 	srp_remove_target(target);
1080 }
1081 
1082 static void srp_rport_delete(struct srp_rport *rport)
1083 {
1084 	struct srp_target_port *target = rport->lld_data;
1085 
1086 	srp_queue_remove_work(target);
1087 }
1088 
1089 /**
1090  * srp_connected_ch() - number of connected channels
1091  * @target: SRP target port.
1092  */
1093 static int srp_connected_ch(struct srp_target_port *target)
1094 {
1095 	int i, c = 0;
1096 
1097 	for (i = 0; i < target->ch_count; i++)
1098 		c += target->ch[i].connected;
1099 
1100 	return c;
1101 }
1102 
1103 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1104 			  bool multich)
1105 {
1106 	struct srp_target_port *target = ch->target;
1107 	int ret;
1108 
1109 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1110 
1111 	ret = srp_lookup_path(ch);
1112 	if (ret)
1113 		goto out;
1114 
1115 	while (1) {
1116 		init_completion(&ch->done);
1117 		ret = srp_send_req(ch, max_iu_len, multich);
1118 		if (ret)
1119 			goto out;
1120 		ret = wait_for_completion_interruptible(&ch->done);
1121 		if (ret < 0)
1122 			goto out;
1123 
1124 		/*
1125 		 * The CM event handling code will set status to
1126 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1127 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1128 		 * redirect REJ back.
1129 		 */
1130 		ret = ch->status;
1131 		switch (ret) {
1132 		case 0:
1133 			ch->connected = true;
1134 			goto out;
1135 
1136 		case SRP_PORT_REDIRECT:
1137 			ret = srp_lookup_path(ch);
1138 			if (ret)
1139 				goto out;
1140 			break;
1141 
1142 		case SRP_DLID_REDIRECT:
1143 			break;
1144 
1145 		case SRP_STALE_CONN:
1146 			shost_printk(KERN_ERR, target->scsi_host, PFX
1147 				     "giving up on stale connection\n");
1148 			ret = -ECONNRESET;
1149 			goto out;
1150 
1151 		default:
1152 			goto out;
1153 		}
1154 	}
1155 
1156 out:
1157 	return ret <= 0 ? ret : -ENODEV;
1158 }
1159 
1160 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1161 {
1162 	srp_handle_qp_err(cq, wc, "INV RKEY");
1163 }
1164 
1165 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1166 		u32 rkey)
1167 {
1168 	struct ib_send_wr wr = {
1169 		.opcode		    = IB_WR_LOCAL_INV,
1170 		.next		    = NULL,
1171 		.num_sge	    = 0,
1172 		.send_flags	    = 0,
1173 		.ex.invalidate_rkey = rkey,
1174 	};
1175 
1176 	wr.wr_cqe = &req->reg_cqe;
1177 	req->reg_cqe.done = srp_inv_rkey_err_done;
1178 	return ib_post_send(ch->qp, &wr, NULL);
1179 }
1180 
1181 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1182 			   struct srp_rdma_ch *ch,
1183 			   struct srp_request *req)
1184 {
1185 	struct srp_target_port *target = ch->target;
1186 	struct srp_device *dev = target->srp_host->srp_dev;
1187 	struct ib_device *ibdev = dev->dev;
1188 	int i, res;
1189 
1190 	if (!scsi_sglist(scmnd) ||
1191 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1192 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1193 		return;
1194 
1195 	if (dev->use_fast_reg) {
1196 		struct srp_fr_desc **pfr;
1197 
1198 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1199 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1200 			if (res < 0) {
1201 				shost_printk(KERN_ERR, target->scsi_host, PFX
1202 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1203 				  (*pfr)->mr->rkey, res);
1204 				queue_work(system_long_wq,
1205 					   &target->tl_err_work);
1206 			}
1207 		}
1208 		if (req->nmdesc)
1209 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1210 					req->nmdesc);
1211 	}
1212 
1213 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1214 			scmnd->sc_data_direction);
1215 }
1216 
1217 /**
1218  * srp_claim_req - Take ownership of the scmnd associated with a request.
1219  * @ch: SRP RDMA channel.
1220  * @req: SRP request.
1221  * @sdev: If not NULL, only take ownership for this SCSI device.
1222  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1223  *         ownership of @req->scmnd if it equals @scmnd.
1224  *
1225  * Return value:
1226  * Either NULL or a pointer to the SCSI command the caller became owner of.
1227  */
1228 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1229 				       struct srp_request *req,
1230 				       struct scsi_device *sdev,
1231 				       struct scsi_cmnd *scmnd)
1232 {
1233 	unsigned long flags;
1234 
1235 	spin_lock_irqsave(&ch->lock, flags);
1236 	if (req->scmnd &&
1237 	    (!sdev || req->scmnd->device == sdev) &&
1238 	    (!scmnd || req->scmnd == scmnd)) {
1239 		scmnd = req->scmnd;
1240 		req->scmnd = NULL;
1241 	} else {
1242 		scmnd = NULL;
1243 	}
1244 	spin_unlock_irqrestore(&ch->lock, flags);
1245 
1246 	return scmnd;
1247 }
1248 
1249 /**
1250  * srp_free_req() - Unmap data and adjust ch->req_lim.
1251  * @ch:     SRP RDMA channel.
1252  * @req:    Request to be freed.
1253  * @scmnd:  SCSI command associated with @req.
1254  * @req_lim_delta: Amount to be added to @target->req_lim.
1255  */
1256 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1257 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1258 {
1259 	unsigned long flags;
1260 
1261 	srp_unmap_data(scmnd, ch, req);
1262 
1263 	spin_lock_irqsave(&ch->lock, flags);
1264 	ch->req_lim += req_lim_delta;
1265 	spin_unlock_irqrestore(&ch->lock, flags);
1266 }
1267 
1268 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1269 			   struct scsi_device *sdev, int result)
1270 {
1271 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1272 
1273 	if (scmnd) {
1274 		srp_free_req(ch, req, scmnd, 0);
1275 		scmnd->result = result;
1276 		scsi_done(scmnd);
1277 	}
1278 }
1279 
1280 struct srp_terminate_context {
1281 	struct srp_target_port *srp_target;
1282 	int scsi_result;
1283 };
1284 
1285 static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
1286 {
1287 	struct srp_terminate_context *context = context_ptr;
1288 	struct srp_target_port *target = context->srp_target;
1289 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
1290 	struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1291 	struct srp_request *req = scsi_cmd_priv(scmnd);
1292 
1293 	srp_finish_req(ch, req, NULL, context->scsi_result);
1294 
1295 	return true;
1296 }
1297 
1298 static void srp_terminate_io(struct srp_rport *rport)
1299 {
1300 	struct srp_target_port *target = rport->lld_data;
1301 	struct srp_terminate_context context = { .srp_target = target,
1302 		.scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1303 
1304 	scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1305 }
1306 
1307 /* Calculate maximum initiator to target information unit length. */
1308 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1309 				  uint32_t max_it_iu_size)
1310 {
1311 	uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1312 		sizeof(struct srp_indirect_buf) +
1313 		cmd_sg_cnt * sizeof(struct srp_direct_buf);
1314 
1315 	if (use_imm_data)
1316 		max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1317 				 srp_max_imm_data);
1318 
1319 	if (max_it_iu_size)
1320 		max_iu_len = min(max_iu_len, max_it_iu_size);
1321 
1322 	pr_debug("max_iu_len = %d\n", max_iu_len);
1323 
1324 	return max_iu_len;
1325 }
1326 
1327 /*
1328  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1329  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1330  * srp_reset_device() or srp_reset_host() calls will occur while this function
1331  * is in progress. One way to realize that is not to call this function
1332  * directly but to call srp_reconnect_rport() instead since that last function
1333  * serializes calls of this function via rport->mutex and also blocks
1334  * srp_queuecommand() calls before invoking this function.
1335  */
1336 static int srp_rport_reconnect(struct srp_rport *rport)
1337 {
1338 	struct srp_target_port *target = rport->lld_data;
1339 	struct srp_rdma_ch *ch;
1340 	uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1341 						srp_use_imm_data,
1342 						target->max_it_iu_size);
1343 	int i, j, ret = 0;
1344 	bool multich = false;
1345 
1346 	srp_disconnect_target(target);
1347 
1348 	if (target->state == SRP_TARGET_SCANNING)
1349 		return -ENODEV;
1350 
1351 	/*
1352 	 * Now get a new local CM ID so that we avoid confusing the target in
1353 	 * case things are really fouled up. Doing so also ensures that all CM
1354 	 * callbacks will have finished before a new QP is allocated.
1355 	 */
1356 	for (i = 0; i < target->ch_count; i++) {
1357 		ch = &target->ch[i];
1358 		ret += srp_new_cm_id(ch);
1359 	}
1360 	{
1361 		struct srp_terminate_context context = {
1362 			.srp_target = target, .scsi_result = DID_RESET << 16};
1363 
1364 		scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1365 				    &context);
1366 	}
1367 	for (i = 0; i < target->ch_count; i++) {
1368 		ch = &target->ch[i];
1369 		/*
1370 		 * Whether or not creating a new CM ID succeeded, create a new
1371 		 * QP. This guarantees that all completion callback function
1372 		 * invocations have finished before request resetting starts.
1373 		 */
1374 		ret += srp_create_ch_ib(ch);
1375 
1376 		INIT_LIST_HEAD(&ch->free_tx);
1377 		for (j = 0; j < target->queue_size; ++j)
1378 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1379 	}
1380 
1381 	target->qp_in_error = false;
1382 
1383 	for (i = 0; i < target->ch_count; i++) {
1384 		ch = &target->ch[i];
1385 		if (ret)
1386 			break;
1387 		ret = srp_connect_ch(ch, max_iu_len, multich);
1388 		multich = true;
1389 	}
1390 
1391 	if (ret == 0)
1392 		shost_printk(KERN_INFO, target->scsi_host,
1393 			     PFX "reconnect succeeded\n");
1394 
1395 	return ret;
1396 }
1397 
1398 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1399 			 unsigned int dma_len, u32 rkey)
1400 {
1401 	struct srp_direct_buf *desc = state->desc;
1402 
1403 	WARN_ON_ONCE(!dma_len);
1404 
1405 	desc->va = cpu_to_be64(dma_addr);
1406 	desc->key = cpu_to_be32(rkey);
1407 	desc->len = cpu_to_be32(dma_len);
1408 
1409 	state->total_len += dma_len;
1410 	state->desc++;
1411 	state->ndesc++;
1412 }
1413 
1414 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1415 {
1416 	srp_handle_qp_err(cq, wc, "FAST REG");
1417 }
1418 
1419 /*
1420  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1421  * where to start in the first element. If sg_offset_p != NULL then
1422  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1423  * byte that has not yet been mapped.
1424  */
1425 static int srp_map_finish_fr(struct srp_map_state *state,
1426 			     struct srp_request *req,
1427 			     struct srp_rdma_ch *ch, int sg_nents,
1428 			     unsigned int *sg_offset_p)
1429 {
1430 	struct srp_target_port *target = ch->target;
1431 	struct srp_device *dev = target->srp_host->srp_dev;
1432 	struct ib_reg_wr wr;
1433 	struct srp_fr_desc *desc;
1434 	u32 rkey;
1435 	int n, err;
1436 
1437 	if (state->fr.next >= state->fr.end) {
1438 		shost_printk(KERN_ERR, ch->target->scsi_host,
1439 			     PFX "Out of MRs (mr_per_cmd = %d)\n",
1440 			     ch->target->mr_per_cmd);
1441 		return -ENOMEM;
1442 	}
1443 
1444 	WARN_ON_ONCE(!dev->use_fast_reg);
1445 
1446 	if (sg_nents == 1 && target->global_rkey) {
1447 		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1448 
1449 		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1450 			     sg_dma_len(state->sg) - sg_offset,
1451 			     target->global_rkey);
1452 		if (sg_offset_p)
1453 			*sg_offset_p = 0;
1454 		return 1;
1455 	}
1456 
1457 	desc = srp_fr_pool_get(ch->fr_pool);
1458 	if (!desc)
1459 		return -ENOMEM;
1460 
1461 	rkey = ib_inc_rkey(desc->mr->rkey);
1462 	ib_update_fast_reg_key(desc->mr, rkey);
1463 
1464 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1465 			 dev->mr_page_size);
1466 	if (unlikely(n < 0)) {
1467 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1468 		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1469 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1470 			 sg_offset_p ? *sg_offset_p : -1, n);
1471 		return n;
1472 	}
1473 
1474 	WARN_ON_ONCE(desc->mr->length == 0);
1475 
1476 	req->reg_cqe.done = srp_reg_mr_err_done;
1477 
1478 	wr.wr.next = NULL;
1479 	wr.wr.opcode = IB_WR_REG_MR;
1480 	wr.wr.wr_cqe = &req->reg_cqe;
1481 	wr.wr.num_sge = 0;
1482 	wr.wr.send_flags = 0;
1483 	wr.mr = desc->mr;
1484 	wr.key = desc->mr->rkey;
1485 	wr.access = (IB_ACCESS_LOCAL_WRITE |
1486 		     IB_ACCESS_REMOTE_READ |
1487 		     IB_ACCESS_REMOTE_WRITE);
1488 
1489 	*state->fr.next++ = desc;
1490 	state->nmdesc++;
1491 
1492 	srp_map_desc(state, desc->mr->iova,
1493 		     desc->mr->length, desc->mr->rkey);
1494 
1495 	err = ib_post_send(ch->qp, &wr.wr, NULL);
1496 	if (unlikely(err)) {
1497 		WARN_ON_ONCE(err == -ENOMEM);
1498 		return err;
1499 	}
1500 
1501 	return n;
1502 }
1503 
1504 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1505 			 struct srp_request *req, struct scatterlist *scat,
1506 			 int count)
1507 {
1508 	unsigned int sg_offset = 0;
1509 
1510 	state->fr.next = req->fr_list;
1511 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1512 	state->sg = scat;
1513 
1514 	if (count == 0)
1515 		return 0;
1516 
1517 	while (count) {
1518 		int i, n;
1519 
1520 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1521 		if (unlikely(n < 0))
1522 			return n;
1523 
1524 		count -= n;
1525 		for (i = 0; i < n; i++)
1526 			state->sg = sg_next(state->sg);
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1533 			  struct srp_request *req, struct scatterlist *scat,
1534 			  int count)
1535 {
1536 	struct srp_target_port *target = ch->target;
1537 	struct scatterlist *sg;
1538 	int i;
1539 
1540 	for_each_sg(scat, sg, count, i) {
1541 		srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1542 			     target->global_rkey);
1543 	}
1544 
1545 	return 0;
1546 }
1547 
1548 /*
1549  * Register the indirect data buffer descriptor with the HCA.
1550  *
1551  * Note: since the indirect data buffer descriptor has been allocated with
1552  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1553  * memory buffer.
1554  */
1555 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1556 		       void **next_mr, void **end_mr, u32 idb_len,
1557 		       __be32 *idb_rkey)
1558 {
1559 	struct srp_target_port *target = ch->target;
1560 	struct srp_device *dev = target->srp_host->srp_dev;
1561 	struct srp_map_state state;
1562 	struct srp_direct_buf idb_desc;
1563 	struct scatterlist idb_sg[1];
1564 	int ret;
1565 
1566 	memset(&state, 0, sizeof(state));
1567 	memset(&idb_desc, 0, sizeof(idb_desc));
1568 	state.gen.next = next_mr;
1569 	state.gen.end = end_mr;
1570 	state.desc = &idb_desc;
1571 	state.base_dma_addr = req->indirect_dma_addr;
1572 	state.dma_len = idb_len;
1573 
1574 	if (dev->use_fast_reg) {
1575 		state.sg = idb_sg;
1576 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1577 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1578 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1579 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1580 #endif
1581 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1582 		if (ret < 0)
1583 			return ret;
1584 		WARN_ON_ONCE(ret < 1);
1585 	} else {
1586 		return -EINVAL;
1587 	}
1588 
1589 	*idb_rkey = idb_desc.key;
1590 
1591 	return 0;
1592 }
1593 
1594 static void srp_check_mapping(struct srp_map_state *state,
1595 			      struct srp_rdma_ch *ch, struct srp_request *req,
1596 			      struct scatterlist *scat, int count)
1597 {
1598 	struct srp_device *dev = ch->target->srp_host->srp_dev;
1599 	struct srp_fr_desc **pfr;
1600 	u64 desc_len = 0, mr_len = 0;
1601 	int i;
1602 
1603 	for (i = 0; i < state->ndesc; i++)
1604 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1605 	if (dev->use_fast_reg)
1606 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1607 			mr_len += (*pfr)->mr->length;
1608 	if (desc_len != scsi_bufflen(req->scmnd) ||
1609 	    mr_len > scsi_bufflen(req->scmnd))
1610 		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1611 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1612 		       state->ndesc, state->nmdesc);
1613 }
1614 
1615 /**
1616  * srp_map_data() - map SCSI data buffer onto an SRP request
1617  * @scmnd: SCSI command to map
1618  * @ch: SRP RDMA channel
1619  * @req: SRP request
1620  *
1621  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1622  * mapping failed. The size of any immediate data is not included in the
1623  * return value.
1624  */
1625 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1626 			struct srp_request *req)
1627 {
1628 	struct srp_target_port *target = ch->target;
1629 	struct scatterlist *scat, *sg;
1630 	struct srp_cmd *cmd = req->cmd->buf;
1631 	int i, len, nents, count, ret;
1632 	struct srp_device *dev;
1633 	struct ib_device *ibdev;
1634 	struct srp_map_state state;
1635 	struct srp_indirect_buf *indirect_hdr;
1636 	u64 data_len;
1637 	u32 idb_len, table_len;
1638 	__be32 idb_rkey;
1639 	u8 fmt;
1640 
1641 	req->cmd->num_sge = 1;
1642 
1643 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1644 		return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1645 
1646 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1647 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1648 		shost_printk(KERN_WARNING, target->scsi_host,
1649 			     PFX "Unhandled data direction %d\n",
1650 			     scmnd->sc_data_direction);
1651 		return -EINVAL;
1652 	}
1653 
1654 	nents = scsi_sg_count(scmnd);
1655 	scat  = scsi_sglist(scmnd);
1656 	data_len = scsi_bufflen(scmnd);
1657 
1658 	dev = target->srp_host->srp_dev;
1659 	ibdev = dev->dev;
1660 
1661 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1662 	if (unlikely(count == 0))
1663 		return -EIO;
1664 
1665 	if (ch->use_imm_data &&
1666 	    count <= ch->max_imm_sge &&
1667 	    SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1668 	    scmnd->sc_data_direction == DMA_TO_DEVICE) {
1669 		struct srp_imm_buf *buf;
1670 		struct ib_sge *sge = &req->cmd->sge[1];
1671 
1672 		fmt = SRP_DATA_DESC_IMM;
1673 		len = SRP_IMM_DATA_OFFSET;
1674 		req->nmdesc = 0;
1675 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1676 		buf->len = cpu_to_be32(data_len);
1677 		WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1678 		for_each_sg(scat, sg, count, i) {
1679 			sge[i].addr   = sg_dma_address(sg);
1680 			sge[i].length = sg_dma_len(sg);
1681 			sge[i].lkey   = target->lkey;
1682 		}
1683 		req->cmd->num_sge += count;
1684 		goto map_complete;
1685 	}
1686 
1687 	fmt = SRP_DATA_DESC_DIRECT;
1688 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1689 		sizeof(struct srp_direct_buf);
1690 
1691 	if (count == 1 && target->global_rkey) {
1692 		/*
1693 		 * The midlayer only generated a single gather/scatter
1694 		 * entry, or DMA mapping coalesced everything to a
1695 		 * single entry.  So a direct descriptor along with
1696 		 * the DMA MR suffices.
1697 		 */
1698 		struct srp_direct_buf *buf;
1699 
1700 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1701 		buf->va  = cpu_to_be64(sg_dma_address(scat));
1702 		buf->key = cpu_to_be32(target->global_rkey);
1703 		buf->len = cpu_to_be32(sg_dma_len(scat));
1704 
1705 		req->nmdesc = 0;
1706 		goto map_complete;
1707 	}
1708 
1709 	/*
1710 	 * We have more than one scatter/gather entry, so build our indirect
1711 	 * descriptor table, trying to merge as many entries as we can.
1712 	 */
1713 	indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1714 
1715 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1716 				   target->indirect_size, DMA_TO_DEVICE);
1717 
1718 	memset(&state, 0, sizeof(state));
1719 	state.desc = req->indirect_desc;
1720 	if (dev->use_fast_reg)
1721 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
1722 	else
1723 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1724 	req->nmdesc = state.nmdesc;
1725 	if (ret < 0)
1726 		goto unmap;
1727 
1728 	{
1729 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1730 			"Memory mapping consistency check");
1731 		if (DYNAMIC_DEBUG_BRANCH(ddm))
1732 			srp_check_mapping(&state, ch, req, scat, count);
1733 	}
1734 
1735 	/* We've mapped the request, now pull as much of the indirect
1736 	 * descriptor table as we can into the command buffer. If this
1737 	 * target is not using an external indirect table, we are
1738 	 * guaranteed to fit into the command, as the SCSI layer won't
1739 	 * give us more S/G entries than we allow.
1740 	 */
1741 	if (state.ndesc == 1) {
1742 		/*
1743 		 * Memory registration collapsed the sg-list into one entry,
1744 		 * so use a direct descriptor.
1745 		 */
1746 		struct srp_direct_buf *buf;
1747 
1748 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1749 		*buf = req->indirect_desc[0];
1750 		goto map_complete;
1751 	}
1752 
1753 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1754 						!target->allow_ext_sg)) {
1755 		shost_printk(KERN_ERR, target->scsi_host,
1756 			     "Could not fit S/G list into SRP_CMD\n");
1757 		ret = -EIO;
1758 		goto unmap;
1759 	}
1760 
1761 	count = min(state.ndesc, target->cmd_sg_cnt);
1762 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1763 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1764 
1765 	fmt = SRP_DATA_DESC_INDIRECT;
1766 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1767 		sizeof(struct srp_indirect_buf);
1768 	len += count * sizeof (struct srp_direct_buf);
1769 
1770 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1771 	       count * sizeof (struct srp_direct_buf));
1772 
1773 	if (!target->global_rkey) {
1774 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1775 				  idb_len, &idb_rkey);
1776 		if (ret < 0)
1777 			goto unmap;
1778 		req->nmdesc++;
1779 	} else {
1780 		idb_rkey = cpu_to_be32(target->global_rkey);
1781 	}
1782 
1783 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1784 	indirect_hdr->table_desc.key = idb_rkey;
1785 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1786 	indirect_hdr->len = cpu_to_be32(state.total_len);
1787 
1788 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1789 		cmd->data_out_desc_cnt = count;
1790 	else
1791 		cmd->data_in_desc_cnt = count;
1792 
1793 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1794 				      DMA_TO_DEVICE);
1795 
1796 map_complete:
1797 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1798 		cmd->buf_fmt = fmt << 4;
1799 	else
1800 		cmd->buf_fmt = fmt;
1801 
1802 	return len;
1803 
1804 unmap:
1805 	srp_unmap_data(scmnd, ch, req);
1806 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1807 		ret = -E2BIG;
1808 	return ret;
1809 }
1810 
1811 /*
1812  * Return an IU and possible credit to the free pool
1813  */
1814 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1815 			  enum srp_iu_type iu_type)
1816 {
1817 	unsigned long flags;
1818 
1819 	spin_lock_irqsave(&ch->lock, flags);
1820 	list_add(&iu->list, &ch->free_tx);
1821 	if (iu_type != SRP_IU_RSP)
1822 		++ch->req_lim;
1823 	spin_unlock_irqrestore(&ch->lock, flags);
1824 }
1825 
1826 /*
1827  * Must be called with ch->lock held to protect req_lim and free_tx.
1828  * If IU is not sent, it must be returned using srp_put_tx_iu().
1829  *
1830  * Note:
1831  * An upper limit for the number of allocated information units for each
1832  * request type is:
1833  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1834  *   more than Scsi_Host.can_queue requests.
1835  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1836  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1837  *   one unanswered SRP request to an initiator.
1838  */
1839 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1840 				      enum srp_iu_type iu_type)
1841 {
1842 	struct srp_target_port *target = ch->target;
1843 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1844 	struct srp_iu *iu;
1845 
1846 	lockdep_assert_held(&ch->lock);
1847 
1848 	ib_process_cq_direct(ch->send_cq, -1);
1849 
1850 	if (list_empty(&ch->free_tx))
1851 		return NULL;
1852 
1853 	/* Initiator responses to target requests do not consume credits */
1854 	if (iu_type != SRP_IU_RSP) {
1855 		if (ch->req_lim <= rsv) {
1856 			++target->zero_req_lim;
1857 			return NULL;
1858 		}
1859 
1860 		--ch->req_lim;
1861 	}
1862 
1863 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1864 	list_del(&iu->list);
1865 	return iu;
1866 }
1867 
1868 /*
1869  * Note: if this function is called from inside ib_drain_sq() then it will
1870  * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1871  * with status IB_WC_SUCCESS then that's a bug.
1872  */
1873 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1874 {
1875 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1876 	struct srp_rdma_ch *ch = cq->cq_context;
1877 
1878 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1879 		srp_handle_qp_err(cq, wc, "SEND");
1880 		return;
1881 	}
1882 
1883 	lockdep_assert_held(&ch->lock);
1884 
1885 	list_add(&iu->list, &ch->free_tx);
1886 }
1887 
1888 /**
1889  * srp_post_send() - send an SRP information unit
1890  * @ch: RDMA channel over which to send the information unit.
1891  * @iu: Information unit to send.
1892  * @len: Length of the information unit excluding immediate data.
1893  */
1894 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1895 {
1896 	struct srp_target_port *target = ch->target;
1897 	struct ib_send_wr wr;
1898 
1899 	if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1900 		return -EINVAL;
1901 
1902 	iu->sge[0].addr   = iu->dma;
1903 	iu->sge[0].length = len;
1904 	iu->sge[0].lkey   = target->lkey;
1905 
1906 	iu->cqe.done = srp_send_done;
1907 
1908 	wr.next       = NULL;
1909 	wr.wr_cqe     = &iu->cqe;
1910 	wr.sg_list    = &iu->sge[0];
1911 	wr.num_sge    = iu->num_sge;
1912 	wr.opcode     = IB_WR_SEND;
1913 	wr.send_flags = IB_SEND_SIGNALED;
1914 
1915 	return ib_post_send(ch->qp, &wr, NULL);
1916 }
1917 
1918 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1919 {
1920 	struct srp_target_port *target = ch->target;
1921 	struct ib_recv_wr wr;
1922 	struct ib_sge list;
1923 
1924 	list.addr   = iu->dma;
1925 	list.length = iu->size;
1926 	list.lkey   = target->lkey;
1927 
1928 	iu->cqe.done = srp_recv_done;
1929 
1930 	wr.next     = NULL;
1931 	wr.wr_cqe   = &iu->cqe;
1932 	wr.sg_list  = &list;
1933 	wr.num_sge  = 1;
1934 
1935 	return ib_post_recv(ch->qp, &wr, NULL);
1936 }
1937 
1938 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1939 {
1940 	struct srp_target_port *target = ch->target;
1941 	struct srp_request *req;
1942 	struct scsi_cmnd *scmnd;
1943 	unsigned long flags;
1944 
1945 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1946 		spin_lock_irqsave(&ch->lock, flags);
1947 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1948 		if (rsp->tag == ch->tsk_mgmt_tag) {
1949 			ch->tsk_mgmt_status = -1;
1950 			if (be32_to_cpu(rsp->resp_data_len) >= 4)
1951 				ch->tsk_mgmt_status = rsp->data[3];
1952 			complete(&ch->tsk_mgmt_done);
1953 		} else {
1954 			shost_printk(KERN_ERR, target->scsi_host,
1955 				     "Received tsk mgmt response too late for tag %#llx\n",
1956 				     rsp->tag);
1957 		}
1958 		spin_unlock_irqrestore(&ch->lock, flags);
1959 	} else {
1960 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1961 		if (scmnd) {
1962 			req = scsi_cmd_priv(scmnd);
1963 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1964 		} else {
1965 			shost_printk(KERN_ERR, target->scsi_host,
1966 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1967 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1968 
1969 			spin_lock_irqsave(&ch->lock, flags);
1970 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1971 			spin_unlock_irqrestore(&ch->lock, flags);
1972 
1973 			return;
1974 		}
1975 		scmnd->result = rsp->status;
1976 
1977 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1978 			memcpy(scmnd->sense_buffer, rsp->data +
1979 			       be32_to_cpu(rsp->resp_data_len),
1980 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1981 				     SCSI_SENSE_BUFFERSIZE));
1982 		}
1983 
1984 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1985 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1986 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1987 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1988 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1989 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1990 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1991 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1992 
1993 		srp_free_req(ch, req, scmnd,
1994 			     be32_to_cpu(rsp->req_lim_delta));
1995 
1996 		scsi_done(scmnd);
1997 	}
1998 }
1999 
2000 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2001 			       void *rsp, int len)
2002 {
2003 	struct srp_target_port *target = ch->target;
2004 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2005 	unsigned long flags;
2006 	struct srp_iu *iu;
2007 	int err;
2008 
2009 	spin_lock_irqsave(&ch->lock, flags);
2010 	ch->req_lim += req_delta;
2011 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2012 	spin_unlock_irqrestore(&ch->lock, flags);
2013 
2014 	if (!iu) {
2015 		shost_printk(KERN_ERR, target->scsi_host, PFX
2016 			     "no IU available to send response\n");
2017 		return 1;
2018 	}
2019 
2020 	iu->num_sge = 1;
2021 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2022 	memcpy(iu->buf, rsp, len);
2023 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2024 
2025 	err = srp_post_send(ch, iu, len);
2026 	if (err) {
2027 		shost_printk(KERN_ERR, target->scsi_host, PFX
2028 			     "unable to post response: %d\n", err);
2029 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2030 	}
2031 
2032 	return err;
2033 }
2034 
2035 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2036 				 struct srp_cred_req *req)
2037 {
2038 	struct srp_cred_rsp rsp = {
2039 		.opcode = SRP_CRED_RSP,
2040 		.tag = req->tag,
2041 	};
2042 	s32 delta = be32_to_cpu(req->req_lim_delta);
2043 
2044 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2045 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2046 			     "problems processing SRP_CRED_REQ\n");
2047 }
2048 
2049 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2050 				struct srp_aer_req *req)
2051 {
2052 	struct srp_target_port *target = ch->target;
2053 	struct srp_aer_rsp rsp = {
2054 		.opcode = SRP_AER_RSP,
2055 		.tag = req->tag,
2056 	};
2057 	s32 delta = be32_to_cpu(req->req_lim_delta);
2058 
2059 	shost_printk(KERN_ERR, target->scsi_host, PFX
2060 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2061 
2062 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2063 		shost_printk(KERN_ERR, target->scsi_host, PFX
2064 			     "problems processing SRP_AER_REQ\n");
2065 }
2066 
2067 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2068 {
2069 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2070 	struct srp_rdma_ch *ch = cq->cq_context;
2071 	struct srp_target_port *target = ch->target;
2072 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2073 	int res;
2074 	u8 opcode;
2075 
2076 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2077 		srp_handle_qp_err(cq, wc, "RECV");
2078 		return;
2079 	}
2080 
2081 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2082 				   DMA_FROM_DEVICE);
2083 
2084 	opcode = *(u8 *) iu->buf;
2085 
2086 	if (0) {
2087 		shost_printk(KERN_ERR, target->scsi_host,
2088 			     PFX "recv completion, opcode 0x%02x\n", opcode);
2089 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2090 			       iu->buf, wc->byte_len, true);
2091 	}
2092 
2093 	switch (opcode) {
2094 	case SRP_RSP:
2095 		srp_process_rsp(ch, iu->buf);
2096 		break;
2097 
2098 	case SRP_CRED_REQ:
2099 		srp_process_cred_req(ch, iu->buf);
2100 		break;
2101 
2102 	case SRP_AER_REQ:
2103 		srp_process_aer_req(ch, iu->buf);
2104 		break;
2105 
2106 	case SRP_T_LOGOUT:
2107 		/* XXX Handle target logout */
2108 		shost_printk(KERN_WARNING, target->scsi_host,
2109 			     PFX "Got target logout request\n");
2110 		break;
2111 
2112 	default:
2113 		shost_printk(KERN_WARNING, target->scsi_host,
2114 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2115 		break;
2116 	}
2117 
2118 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2119 				      DMA_FROM_DEVICE);
2120 
2121 	res = srp_post_recv(ch, iu);
2122 	if (res != 0)
2123 		shost_printk(KERN_ERR, target->scsi_host,
2124 			     PFX "Recv failed with error code %d\n", res);
2125 }
2126 
2127 /**
2128  * srp_tl_err_work() - handle a transport layer error
2129  * @work: Work structure embedded in an SRP target port.
2130  *
2131  * Note: This function may get invoked before the rport has been created,
2132  * hence the target->rport test.
2133  */
2134 static void srp_tl_err_work(struct work_struct *work)
2135 {
2136 	struct srp_target_port *target;
2137 
2138 	target = container_of(work, struct srp_target_port, tl_err_work);
2139 	if (target->rport)
2140 		srp_start_tl_fail_timers(target->rport);
2141 }
2142 
2143 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2144 		const char *opname)
2145 {
2146 	struct srp_rdma_ch *ch = cq->cq_context;
2147 	struct srp_target_port *target = ch->target;
2148 
2149 	if (ch->connected && !target->qp_in_error) {
2150 		shost_printk(KERN_ERR, target->scsi_host,
2151 			     PFX "failed %s status %s (%d) for CQE %p\n",
2152 			     opname, ib_wc_status_msg(wc->status), wc->status,
2153 			     wc->wr_cqe);
2154 		queue_work(system_long_wq, &target->tl_err_work);
2155 	}
2156 	target->qp_in_error = true;
2157 }
2158 
2159 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2160 {
2161 	struct request *rq = scsi_cmd_to_rq(scmnd);
2162 	struct srp_target_port *target = host_to_target(shost);
2163 	struct srp_rdma_ch *ch;
2164 	struct srp_request *req = scsi_cmd_priv(scmnd);
2165 	struct srp_iu *iu;
2166 	struct srp_cmd *cmd;
2167 	struct ib_device *dev;
2168 	unsigned long flags;
2169 	u32 tag;
2170 	int len, ret;
2171 
2172 	scmnd->result = srp_chkready(target->rport);
2173 	if (unlikely(scmnd->result))
2174 		goto err;
2175 
2176 	WARN_ON_ONCE(rq->tag < 0);
2177 	tag = blk_mq_unique_tag(rq);
2178 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2179 
2180 	spin_lock_irqsave(&ch->lock, flags);
2181 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2182 	spin_unlock_irqrestore(&ch->lock, flags);
2183 
2184 	if (!iu)
2185 		goto err;
2186 
2187 	dev = target->srp_host->srp_dev->dev;
2188 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2189 				   DMA_TO_DEVICE);
2190 
2191 	cmd = iu->buf;
2192 	memset(cmd, 0, sizeof *cmd);
2193 
2194 	cmd->opcode = SRP_CMD;
2195 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2196 	cmd->tag    = tag;
2197 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2198 	if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2199 		cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2200 					    4);
2201 		if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2202 			goto err_iu;
2203 	}
2204 
2205 	req->scmnd    = scmnd;
2206 	req->cmd      = iu;
2207 
2208 	len = srp_map_data(scmnd, ch, req);
2209 	if (len < 0) {
2210 		shost_printk(KERN_ERR, target->scsi_host,
2211 			     PFX "Failed to map data (%d)\n", len);
2212 		/*
2213 		 * If we ran out of memory descriptors (-ENOMEM) because an
2214 		 * application is queuing many requests with more than
2215 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2216 		 * to reduce queue depth temporarily.
2217 		 */
2218 		scmnd->result = len == -ENOMEM ?
2219 			DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2220 		goto err_iu;
2221 	}
2222 
2223 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2224 				      DMA_TO_DEVICE);
2225 
2226 	if (srp_post_send(ch, iu, len)) {
2227 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2228 		scmnd->result = DID_ERROR << 16;
2229 		goto err_unmap;
2230 	}
2231 
2232 	return 0;
2233 
2234 err_unmap:
2235 	srp_unmap_data(scmnd, ch, req);
2236 
2237 err_iu:
2238 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2239 
2240 	/*
2241 	 * Avoid that the loops that iterate over the request ring can
2242 	 * encounter a dangling SCSI command pointer.
2243 	 */
2244 	req->scmnd = NULL;
2245 
2246 err:
2247 	if (scmnd->result) {
2248 		scsi_done(scmnd);
2249 		ret = 0;
2250 	} else {
2251 		ret = SCSI_MLQUEUE_HOST_BUSY;
2252 	}
2253 
2254 	return ret;
2255 }
2256 
2257 /*
2258  * Note: the resources allocated in this function are freed in
2259  * srp_free_ch_ib().
2260  */
2261 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2262 {
2263 	struct srp_target_port *target = ch->target;
2264 	int i;
2265 
2266 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2267 			      GFP_KERNEL);
2268 	if (!ch->rx_ring)
2269 		goto err_no_ring;
2270 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2271 			      GFP_KERNEL);
2272 	if (!ch->tx_ring)
2273 		goto err_no_ring;
2274 
2275 	for (i = 0; i < target->queue_size; ++i) {
2276 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2277 					      ch->max_ti_iu_len,
2278 					      GFP_KERNEL, DMA_FROM_DEVICE);
2279 		if (!ch->rx_ring[i])
2280 			goto err;
2281 	}
2282 
2283 	for (i = 0; i < target->queue_size; ++i) {
2284 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2285 					      ch->max_it_iu_len,
2286 					      GFP_KERNEL, DMA_TO_DEVICE);
2287 		if (!ch->tx_ring[i])
2288 			goto err;
2289 
2290 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2291 	}
2292 
2293 	return 0;
2294 
2295 err:
2296 	for (i = 0; i < target->queue_size; ++i) {
2297 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2298 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2299 	}
2300 
2301 
2302 err_no_ring:
2303 	kfree(ch->tx_ring);
2304 	ch->tx_ring = NULL;
2305 	kfree(ch->rx_ring);
2306 	ch->rx_ring = NULL;
2307 
2308 	return -ENOMEM;
2309 }
2310 
2311 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2312 {
2313 	uint64_t T_tr_ns, max_compl_time_ms;
2314 	uint32_t rq_tmo_jiffies;
2315 
2316 	/*
2317 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2318 	 * table 91), both the QP timeout and the retry count have to be set
2319 	 * for RC QP's during the RTR to RTS transition.
2320 	 */
2321 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2322 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2323 
2324 	/*
2325 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2326 	 * it can take before an error completion is generated. See also
2327 	 * C9-140..142 in the IBTA spec for more information about how to
2328 	 * convert the QP Local ACK Timeout value to nanoseconds.
2329 	 */
2330 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2331 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2332 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2333 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2334 
2335 	return rq_tmo_jiffies;
2336 }
2337 
2338 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2339 			       const struct srp_login_rsp *lrsp,
2340 			       struct srp_rdma_ch *ch)
2341 {
2342 	struct srp_target_port *target = ch->target;
2343 	struct ib_qp_attr *qp_attr = NULL;
2344 	int attr_mask = 0;
2345 	int ret = 0;
2346 	int i;
2347 
2348 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2349 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2350 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2351 		ch->use_imm_data  = srp_use_imm_data &&
2352 			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2353 		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2354 						      ch->use_imm_data,
2355 						      target->max_it_iu_size);
2356 		WARN_ON_ONCE(ch->max_it_iu_len >
2357 			     be32_to_cpu(lrsp->max_it_iu_len));
2358 
2359 		if (ch->use_imm_data)
2360 			shost_printk(KERN_DEBUG, target->scsi_host,
2361 				     PFX "using immediate data\n");
2362 
2363 		/*
2364 		 * Reserve credits for task management so we don't
2365 		 * bounce requests back to the SCSI mid-layer.
2366 		 */
2367 		target->scsi_host->can_queue
2368 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2369 			      target->scsi_host->can_queue);
2370 		target->scsi_host->cmd_per_lun
2371 			= min_t(int, target->scsi_host->can_queue,
2372 				target->scsi_host->cmd_per_lun);
2373 	} else {
2374 		shost_printk(KERN_WARNING, target->scsi_host,
2375 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2376 		ret = -ECONNRESET;
2377 		goto error;
2378 	}
2379 
2380 	if (!ch->rx_ring) {
2381 		ret = srp_alloc_iu_bufs(ch);
2382 		if (ret)
2383 			goto error;
2384 	}
2385 
2386 	for (i = 0; i < target->queue_size; i++) {
2387 		struct srp_iu *iu = ch->rx_ring[i];
2388 
2389 		ret = srp_post_recv(ch, iu);
2390 		if (ret)
2391 			goto error;
2392 	}
2393 
2394 	if (!target->using_rdma_cm) {
2395 		ret = -ENOMEM;
2396 		qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2397 		if (!qp_attr)
2398 			goto error;
2399 
2400 		qp_attr->qp_state = IB_QPS_RTR;
2401 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2402 		if (ret)
2403 			goto error_free;
2404 
2405 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2406 		if (ret)
2407 			goto error_free;
2408 
2409 		qp_attr->qp_state = IB_QPS_RTS;
2410 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2411 		if (ret)
2412 			goto error_free;
2413 
2414 		target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2415 
2416 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2417 		if (ret)
2418 			goto error_free;
2419 
2420 		ret = ib_send_cm_rtu(cm_id, NULL, 0);
2421 	}
2422 
2423 error_free:
2424 	kfree(qp_attr);
2425 
2426 error:
2427 	ch->status = ret;
2428 }
2429 
2430 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2431 				  const struct ib_cm_event *event,
2432 				  struct srp_rdma_ch *ch)
2433 {
2434 	struct srp_target_port *target = ch->target;
2435 	struct Scsi_Host *shost = target->scsi_host;
2436 	struct ib_class_port_info *cpi;
2437 	int opcode;
2438 	u16 dlid;
2439 
2440 	switch (event->param.rej_rcvd.reason) {
2441 	case IB_CM_REJ_PORT_CM_REDIRECT:
2442 		cpi = event->param.rej_rcvd.ari;
2443 		dlid = be16_to_cpu(cpi->redirect_lid);
2444 		sa_path_set_dlid(&ch->ib_cm.path, dlid);
2445 		ch->ib_cm.path.pkey = cpi->redirect_pkey;
2446 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2447 		memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2448 
2449 		ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2450 		break;
2451 
2452 	case IB_CM_REJ_PORT_REDIRECT:
2453 		if (srp_target_is_topspin(target)) {
2454 			union ib_gid *dgid = &ch->ib_cm.path.dgid;
2455 
2456 			/*
2457 			 * Topspin/Cisco SRP gateways incorrectly send
2458 			 * reject reason code 25 when they mean 24
2459 			 * (port redirect).
2460 			 */
2461 			memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2462 
2463 			shost_printk(KERN_DEBUG, shost,
2464 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2465 				     be64_to_cpu(dgid->global.subnet_prefix),
2466 				     be64_to_cpu(dgid->global.interface_id));
2467 
2468 			ch->status = SRP_PORT_REDIRECT;
2469 		} else {
2470 			shost_printk(KERN_WARNING, shost,
2471 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2472 			ch->status = -ECONNRESET;
2473 		}
2474 		break;
2475 
2476 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2477 		shost_printk(KERN_WARNING, shost,
2478 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2479 		ch->status = -ECONNRESET;
2480 		break;
2481 
2482 	case IB_CM_REJ_CONSUMER_DEFINED:
2483 		opcode = *(u8 *) event->private_data;
2484 		if (opcode == SRP_LOGIN_REJ) {
2485 			struct srp_login_rej *rej = event->private_data;
2486 			u32 reason = be32_to_cpu(rej->reason);
2487 
2488 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2489 				shost_printk(KERN_WARNING, shost,
2490 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2491 			else
2492 				shost_printk(KERN_WARNING, shost, PFX
2493 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2494 					     target->sgid.raw,
2495 					     target->ib_cm.orig_dgid.raw,
2496 					     reason);
2497 		} else
2498 			shost_printk(KERN_WARNING, shost,
2499 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2500 				     " opcode 0x%02x\n", opcode);
2501 		ch->status = -ECONNRESET;
2502 		break;
2503 
2504 	case IB_CM_REJ_STALE_CONN:
2505 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2506 		ch->status = SRP_STALE_CONN;
2507 		break;
2508 
2509 	default:
2510 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2511 			     event->param.rej_rcvd.reason);
2512 		ch->status = -ECONNRESET;
2513 	}
2514 }
2515 
2516 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2517 			     const struct ib_cm_event *event)
2518 {
2519 	struct srp_rdma_ch *ch = cm_id->context;
2520 	struct srp_target_port *target = ch->target;
2521 	int comp = 0;
2522 
2523 	switch (event->event) {
2524 	case IB_CM_REQ_ERROR:
2525 		shost_printk(KERN_DEBUG, target->scsi_host,
2526 			     PFX "Sending CM REQ failed\n");
2527 		comp = 1;
2528 		ch->status = -ECONNRESET;
2529 		break;
2530 
2531 	case IB_CM_REP_RECEIVED:
2532 		comp = 1;
2533 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2534 		break;
2535 
2536 	case IB_CM_REJ_RECEIVED:
2537 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2538 		comp = 1;
2539 
2540 		srp_ib_cm_rej_handler(cm_id, event, ch);
2541 		break;
2542 
2543 	case IB_CM_DREQ_RECEIVED:
2544 		shost_printk(KERN_WARNING, target->scsi_host,
2545 			     PFX "DREQ received - connection closed\n");
2546 		ch->connected = false;
2547 		if (ib_send_cm_drep(cm_id, NULL, 0))
2548 			shost_printk(KERN_ERR, target->scsi_host,
2549 				     PFX "Sending CM DREP failed\n");
2550 		queue_work(system_long_wq, &target->tl_err_work);
2551 		break;
2552 
2553 	case IB_CM_TIMEWAIT_EXIT:
2554 		shost_printk(KERN_ERR, target->scsi_host,
2555 			     PFX "connection closed\n");
2556 		comp = 1;
2557 
2558 		ch->status = 0;
2559 		break;
2560 
2561 	case IB_CM_MRA_RECEIVED:
2562 	case IB_CM_DREQ_ERROR:
2563 	case IB_CM_DREP_RECEIVED:
2564 		break;
2565 
2566 	default:
2567 		shost_printk(KERN_WARNING, target->scsi_host,
2568 			     PFX "Unhandled CM event %d\n", event->event);
2569 		break;
2570 	}
2571 
2572 	if (comp)
2573 		complete(&ch->done);
2574 
2575 	return 0;
2576 }
2577 
2578 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2579 				    struct rdma_cm_event *event)
2580 {
2581 	struct srp_target_port *target = ch->target;
2582 	struct Scsi_Host *shost = target->scsi_host;
2583 	int opcode;
2584 
2585 	switch (event->status) {
2586 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2587 		shost_printk(KERN_WARNING, shost,
2588 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2589 		ch->status = -ECONNRESET;
2590 		break;
2591 
2592 	case IB_CM_REJ_CONSUMER_DEFINED:
2593 		opcode = *(u8 *) event->param.conn.private_data;
2594 		if (opcode == SRP_LOGIN_REJ) {
2595 			struct srp_login_rej *rej =
2596 				(struct srp_login_rej *)
2597 				event->param.conn.private_data;
2598 			u32 reason = be32_to_cpu(rej->reason);
2599 
2600 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2601 				shost_printk(KERN_WARNING, shost,
2602 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2603 			else
2604 				shost_printk(KERN_WARNING, shost,
2605 					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2606 		} else {
2607 			shost_printk(KERN_WARNING, shost,
2608 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2609 				     opcode);
2610 		}
2611 		ch->status = -ECONNRESET;
2612 		break;
2613 
2614 	case IB_CM_REJ_STALE_CONN:
2615 		shost_printk(KERN_WARNING, shost,
2616 			     "  REJ reason: stale connection\n");
2617 		ch->status = SRP_STALE_CONN;
2618 		break;
2619 
2620 	default:
2621 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2622 			     event->status);
2623 		ch->status = -ECONNRESET;
2624 		break;
2625 	}
2626 }
2627 
2628 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2629 			       struct rdma_cm_event *event)
2630 {
2631 	struct srp_rdma_ch *ch = cm_id->context;
2632 	struct srp_target_port *target = ch->target;
2633 	int comp = 0;
2634 
2635 	switch (event->event) {
2636 	case RDMA_CM_EVENT_ADDR_RESOLVED:
2637 		ch->status = 0;
2638 		comp = 1;
2639 		break;
2640 
2641 	case RDMA_CM_EVENT_ADDR_ERROR:
2642 		ch->status = -ENXIO;
2643 		comp = 1;
2644 		break;
2645 
2646 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
2647 		ch->status = 0;
2648 		comp = 1;
2649 		break;
2650 
2651 	case RDMA_CM_EVENT_ROUTE_ERROR:
2652 	case RDMA_CM_EVENT_UNREACHABLE:
2653 		ch->status = -EHOSTUNREACH;
2654 		comp = 1;
2655 		break;
2656 
2657 	case RDMA_CM_EVENT_CONNECT_ERROR:
2658 		shost_printk(KERN_DEBUG, target->scsi_host,
2659 			     PFX "Sending CM REQ failed\n");
2660 		comp = 1;
2661 		ch->status = -ECONNRESET;
2662 		break;
2663 
2664 	case RDMA_CM_EVENT_ESTABLISHED:
2665 		comp = 1;
2666 		srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2667 		break;
2668 
2669 	case RDMA_CM_EVENT_REJECTED:
2670 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2671 		comp = 1;
2672 
2673 		srp_rdma_cm_rej_handler(ch, event);
2674 		break;
2675 
2676 	case RDMA_CM_EVENT_DISCONNECTED:
2677 		if (ch->connected) {
2678 			shost_printk(KERN_WARNING, target->scsi_host,
2679 				     PFX "received DREQ\n");
2680 			rdma_disconnect(ch->rdma_cm.cm_id);
2681 			comp = 1;
2682 			ch->status = 0;
2683 			queue_work(system_long_wq, &target->tl_err_work);
2684 		}
2685 		break;
2686 
2687 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2688 		shost_printk(KERN_ERR, target->scsi_host,
2689 			     PFX "connection closed\n");
2690 
2691 		comp = 1;
2692 		ch->status = 0;
2693 		break;
2694 
2695 	default:
2696 		shost_printk(KERN_WARNING, target->scsi_host,
2697 			     PFX "Unhandled CM event %d\n", event->event);
2698 		break;
2699 	}
2700 
2701 	if (comp)
2702 		complete(&ch->done);
2703 
2704 	return 0;
2705 }
2706 
2707 /**
2708  * srp_change_queue_depth - setting device queue depth
2709  * @sdev: scsi device struct
2710  * @qdepth: requested queue depth
2711  *
2712  * Returns queue depth.
2713  */
2714 static int
2715 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2716 {
2717 	if (!sdev->tagged_supported)
2718 		qdepth = 1;
2719 	return scsi_change_queue_depth(sdev, qdepth);
2720 }
2721 
2722 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2723 			     u8 func, u8 *status)
2724 {
2725 	struct srp_target_port *target = ch->target;
2726 	struct srp_rport *rport = target->rport;
2727 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2728 	struct srp_iu *iu;
2729 	struct srp_tsk_mgmt *tsk_mgmt;
2730 	int res;
2731 
2732 	if (!ch->connected || target->qp_in_error)
2733 		return -1;
2734 
2735 	/*
2736 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2737 	 * invoked while a task management function is being sent.
2738 	 */
2739 	mutex_lock(&rport->mutex);
2740 	spin_lock_irq(&ch->lock);
2741 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2742 	spin_unlock_irq(&ch->lock);
2743 
2744 	if (!iu) {
2745 		mutex_unlock(&rport->mutex);
2746 
2747 		return -1;
2748 	}
2749 
2750 	iu->num_sge = 1;
2751 
2752 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2753 				   DMA_TO_DEVICE);
2754 	tsk_mgmt = iu->buf;
2755 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2756 
2757 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2758 	int_to_scsilun(lun, &tsk_mgmt->lun);
2759 	tsk_mgmt->tsk_mgmt_func = func;
2760 	tsk_mgmt->task_tag	= req_tag;
2761 
2762 	spin_lock_irq(&ch->lock);
2763 	ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2764 	tsk_mgmt->tag = ch->tsk_mgmt_tag;
2765 	spin_unlock_irq(&ch->lock);
2766 
2767 	init_completion(&ch->tsk_mgmt_done);
2768 
2769 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2770 				      DMA_TO_DEVICE);
2771 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2772 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2773 		mutex_unlock(&rport->mutex);
2774 
2775 		return -1;
2776 	}
2777 	res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2778 					msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2779 	if (res > 0 && status)
2780 		*status = ch->tsk_mgmt_status;
2781 	mutex_unlock(&rport->mutex);
2782 
2783 	WARN_ON_ONCE(res < 0);
2784 
2785 	return res > 0 ? 0 : -1;
2786 }
2787 
2788 static int srp_abort(struct scsi_cmnd *scmnd)
2789 {
2790 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2791 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2792 	u32 tag;
2793 	u16 ch_idx;
2794 	struct srp_rdma_ch *ch;
2795 	int ret;
2796 
2797 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2798 
2799 	if (!req)
2800 		return SUCCESS;
2801 	tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
2802 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2803 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2804 		return SUCCESS;
2805 	ch = &target->ch[ch_idx];
2806 	if (!srp_claim_req(ch, req, NULL, scmnd))
2807 		return SUCCESS;
2808 	shost_printk(KERN_ERR, target->scsi_host,
2809 		     "Sending SRP abort for tag %#x\n", tag);
2810 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2811 			      SRP_TSK_ABORT_TASK, NULL) == 0)
2812 		ret = SUCCESS;
2813 	else if (target->rport->state == SRP_RPORT_LOST)
2814 		ret = FAST_IO_FAIL;
2815 	else
2816 		ret = FAILED;
2817 	if (ret == SUCCESS) {
2818 		srp_free_req(ch, req, scmnd, 0);
2819 		scmnd->result = DID_ABORT << 16;
2820 		scsi_done(scmnd);
2821 	}
2822 
2823 	return ret;
2824 }
2825 
2826 static int srp_reset_device(struct scsi_cmnd *scmnd)
2827 {
2828 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2829 	struct srp_rdma_ch *ch;
2830 	u8 status;
2831 
2832 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2833 
2834 	ch = &target->ch[0];
2835 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2836 			      SRP_TSK_LUN_RESET, &status))
2837 		return FAILED;
2838 	if (status)
2839 		return FAILED;
2840 
2841 	return SUCCESS;
2842 }
2843 
2844 static int srp_reset_host(struct scsi_cmnd *scmnd)
2845 {
2846 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2847 
2848 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2849 
2850 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2851 }
2852 
2853 static int srp_target_alloc(struct scsi_target *starget)
2854 {
2855 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2856 	struct srp_target_port *target = host_to_target(shost);
2857 
2858 	if (target->target_can_queue)
2859 		starget->can_queue = target->target_can_queue;
2860 	return 0;
2861 }
2862 
2863 static int srp_slave_configure(struct scsi_device *sdev)
2864 {
2865 	struct Scsi_Host *shost = sdev->host;
2866 	struct srp_target_port *target = host_to_target(shost);
2867 	struct request_queue *q = sdev->request_queue;
2868 	unsigned long timeout;
2869 
2870 	if (sdev->type == TYPE_DISK) {
2871 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2872 		blk_queue_rq_timeout(q, timeout);
2873 	}
2874 
2875 	return 0;
2876 }
2877 
2878 static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2879 			   char *buf)
2880 {
2881 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2882 
2883 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2884 }
2885 
2886 static DEVICE_ATTR_RO(id_ext);
2887 
2888 static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2889 			     char *buf)
2890 {
2891 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2892 
2893 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2894 }
2895 
2896 static DEVICE_ATTR_RO(ioc_guid);
2897 
2898 static ssize_t service_id_show(struct device *dev,
2899 			       struct device_attribute *attr, char *buf)
2900 {
2901 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2902 
2903 	if (target->using_rdma_cm)
2904 		return -ENOENT;
2905 	return sysfs_emit(buf, "0x%016llx\n",
2906 			  be64_to_cpu(target->ib_cm.service_id));
2907 }
2908 
2909 static DEVICE_ATTR_RO(service_id);
2910 
2911 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2912 			 char *buf)
2913 {
2914 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2915 
2916 	if (target->using_rdma_cm)
2917 		return -ENOENT;
2918 
2919 	return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2920 }
2921 
2922 static DEVICE_ATTR_RO(pkey);
2923 
2924 static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2925 			 char *buf)
2926 {
2927 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2928 
2929 	return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2930 }
2931 
2932 static DEVICE_ATTR_RO(sgid);
2933 
2934 static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2935 			 char *buf)
2936 {
2937 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2938 	struct srp_rdma_ch *ch = &target->ch[0];
2939 
2940 	if (target->using_rdma_cm)
2941 		return -ENOENT;
2942 
2943 	return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2944 }
2945 
2946 static DEVICE_ATTR_RO(dgid);
2947 
2948 static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2949 			      char *buf)
2950 {
2951 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2952 
2953 	if (target->using_rdma_cm)
2954 		return -ENOENT;
2955 
2956 	return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2957 }
2958 
2959 static DEVICE_ATTR_RO(orig_dgid);
2960 
2961 static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2962 			    char *buf)
2963 {
2964 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2965 	struct srp_rdma_ch *ch;
2966 	int i, req_lim = INT_MAX;
2967 
2968 	for (i = 0; i < target->ch_count; i++) {
2969 		ch = &target->ch[i];
2970 		req_lim = min(req_lim, ch->req_lim);
2971 	}
2972 
2973 	return sysfs_emit(buf, "%d\n", req_lim);
2974 }
2975 
2976 static DEVICE_ATTR_RO(req_lim);
2977 
2978 static ssize_t zero_req_lim_show(struct device *dev,
2979 				 struct device_attribute *attr, char *buf)
2980 {
2981 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2982 
2983 	return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2984 }
2985 
2986 static DEVICE_ATTR_RO(zero_req_lim);
2987 
2988 static ssize_t local_ib_port_show(struct device *dev,
2989 				  struct device_attribute *attr, char *buf)
2990 {
2991 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2992 
2993 	return sysfs_emit(buf, "%d\n", target->srp_host->port);
2994 }
2995 
2996 static DEVICE_ATTR_RO(local_ib_port);
2997 
2998 static ssize_t local_ib_device_show(struct device *dev,
2999 				    struct device_attribute *attr, char *buf)
3000 {
3001 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3002 
3003 	return sysfs_emit(buf, "%s\n",
3004 			  dev_name(&target->srp_host->srp_dev->dev->dev));
3005 }
3006 
3007 static DEVICE_ATTR_RO(local_ib_device);
3008 
3009 static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
3010 			     char *buf)
3011 {
3012 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3013 
3014 	return sysfs_emit(buf, "%d\n", target->ch_count);
3015 }
3016 
3017 static DEVICE_ATTR_RO(ch_count);
3018 
3019 static ssize_t comp_vector_show(struct device *dev,
3020 				struct device_attribute *attr, char *buf)
3021 {
3022 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3023 
3024 	return sysfs_emit(buf, "%d\n", target->comp_vector);
3025 }
3026 
3027 static DEVICE_ATTR_RO(comp_vector);
3028 
3029 static ssize_t tl_retry_count_show(struct device *dev,
3030 				   struct device_attribute *attr, char *buf)
3031 {
3032 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3033 
3034 	return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3035 }
3036 
3037 static DEVICE_ATTR_RO(tl_retry_count);
3038 
3039 static ssize_t cmd_sg_entries_show(struct device *dev,
3040 				   struct device_attribute *attr, char *buf)
3041 {
3042 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3043 
3044 	return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3045 }
3046 
3047 static DEVICE_ATTR_RO(cmd_sg_entries);
3048 
3049 static ssize_t allow_ext_sg_show(struct device *dev,
3050 				 struct device_attribute *attr, char *buf)
3051 {
3052 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3053 
3054 	return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3055 }
3056 
3057 static DEVICE_ATTR_RO(allow_ext_sg);
3058 
3059 static struct attribute *srp_host_attrs[] = {
3060 	&dev_attr_id_ext.attr,
3061 	&dev_attr_ioc_guid.attr,
3062 	&dev_attr_service_id.attr,
3063 	&dev_attr_pkey.attr,
3064 	&dev_attr_sgid.attr,
3065 	&dev_attr_dgid.attr,
3066 	&dev_attr_orig_dgid.attr,
3067 	&dev_attr_req_lim.attr,
3068 	&dev_attr_zero_req_lim.attr,
3069 	&dev_attr_local_ib_port.attr,
3070 	&dev_attr_local_ib_device.attr,
3071 	&dev_attr_ch_count.attr,
3072 	&dev_attr_comp_vector.attr,
3073 	&dev_attr_tl_retry_count.attr,
3074 	&dev_attr_cmd_sg_entries.attr,
3075 	&dev_attr_allow_ext_sg.attr,
3076 	NULL
3077 };
3078 
3079 ATTRIBUTE_GROUPS(srp_host);
3080 
3081 static struct scsi_host_template srp_template = {
3082 	.module				= THIS_MODULE,
3083 	.name				= "InfiniBand SRP initiator",
3084 	.proc_name			= DRV_NAME,
3085 	.target_alloc			= srp_target_alloc,
3086 	.slave_configure		= srp_slave_configure,
3087 	.info				= srp_target_info,
3088 	.init_cmd_priv			= srp_init_cmd_priv,
3089 	.exit_cmd_priv			= srp_exit_cmd_priv,
3090 	.queuecommand			= srp_queuecommand,
3091 	.change_queue_depth             = srp_change_queue_depth,
3092 	.eh_timed_out			= srp_timed_out,
3093 	.eh_abort_handler		= srp_abort,
3094 	.eh_device_reset_handler	= srp_reset_device,
3095 	.eh_host_reset_handler		= srp_reset_host,
3096 	.skip_settle_delay		= true,
3097 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
3098 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
3099 	.this_id			= -1,
3100 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
3101 	.shost_groups			= srp_host_groups,
3102 	.track_queue_depth		= 1,
3103 	.cmd_size			= sizeof(struct srp_request),
3104 };
3105 
3106 static int srp_sdev_count(struct Scsi_Host *host)
3107 {
3108 	struct scsi_device *sdev;
3109 	int c = 0;
3110 
3111 	shost_for_each_device(sdev, host)
3112 		c++;
3113 
3114 	return c;
3115 }
3116 
3117 /*
3118  * Return values:
3119  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3120  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3121  *    removal has been scheduled.
3122  * 0 and target->state != SRP_TARGET_REMOVED upon success.
3123  */
3124 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3125 {
3126 	struct srp_rport_identifiers ids;
3127 	struct srp_rport *rport;
3128 
3129 	target->state = SRP_TARGET_SCANNING;
3130 	sprintf(target->target_name, "SRP.T10:%016llX",
3131 		be64_to_cpu(target->id_ext));
3132 
3133 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3134 		return -ENODEV;
3135 
3136 	memcpy(ids.port_id, &target->id_ext, 8);
3137 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3138 	ids.roles = SRP_RPORT_ROLE_TARGET;
3139 	rport = srp_rport_add(target->scsi_host, &ids);
3140 	if (IS_ERR(rport)) {
3141 		scsi_remove_host(target->scsi_host);
3142 		return PTR_ERR(rport);
3143 	}
3144 
3145 	rport->lld_data = target;
3146 	target->rport = rport;
3147 
3148 	spin_lock(&host->target_lock);
3149 	list_add_tail(&target->list, &host->target_list);
3150 	spin_unlock(&host->target_lock);
3151 
3152 	scsi_scan_target(&target->scsi_host->shost_gendev,
3153 			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3154 
3155 	if (srp_connected_ch(target) < target->ch_count ||
3156 	    target->qp_in_error) {
3157 		shost_printk(KERN_INFO, target->scsi_host,
3158 			     PFX "SCSI scan failed - removing SCSI host\n");
3159 		srp_queue_remove_work(target);
3160 		goto out;
3161 	}
3162 
3163 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3164 		 dev_name(&target->scsi_host->shost_gendev),
3165 		 srp_sdev_count(target->scsi_host));
3166 
3167 	spin_lock_irq(&target->lock);
3168 	if (target->state == SRP_TARGET_SCANNING)
3169 		target->state = SRP_TARGET_LIVE;
3170 	spin_unlock_irq(&target->lock);
3171 
3172 out:
3173 	return 0;
3174 }
3175 
3176 static void srp_release_dev(struct device *dev)
3177 {
3178 	struct srp_host *host =
3179 		container_of(dev, struct srp_host, dev);
3180 
3181 	complete(&host->released);
3182 }
3183 
3184 static struct class srp_class = {
3185 	.name    = "infiniband_srp",
3186 	.dev_release = srp_release_dev
3187 };
3188 
3189 /**
3190  * srp_conn_unique() - check whether the connection to a target is unique
3191  * @host:   SRP host.
3192  * @target: SRP target port.
3193  */
3194 static bool srp_conn_unique(struct srp_host *host,
3195 			    struct srp_target_port *target)
3196 {
3197 	struct srp_target_port *t;
3198 	bool ret = false;
3199 
3200 	if (target->state == SRP_TARGET_REMOVED)
3201 		goto out;
3202 
3203 	ret = true;
3204 
3205 	spin_lock(&host->target_lock);
3206 	list_for_each_entry(t, &host->target_list, list) {
3207 		if (t != target &&
3208 		    target->id_ext == t->id_ext &&
3209 		    target->ioc_guid == t->ioc_guid &&
3210 		    target->initiator_ext == t->initiator_ext) {
3211 			ret = false;
3212 			break;
3213 		}
3214 	}
3215 	spin_unlock(&host->target_lock);
3216 
3217 out:
3218 	return ret;
3219 }
3220 
3221 /*
3222  * Target ports are added by writing
3223  *
3224  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3225  *     pkey=<P_Key>,service_id=<service ID>
3226  * or
3227  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3228  *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3229  *
3230  * to the add_target sysfs attribute.
3231  */
3232 enum {
3233 	SRP_OPT_ERR		= 0,
3234 	SRP_OPT_ID_EXT		= 1 << 0,
3235 	SRP_OPT_IOC_GUID	= 1 << 1,
3236 	SRP_OPT_DGID		= 1 << 2,
3237 	SRP_OPT_PKEY		= 1 << 3,
3238 	SRP_OPT_SERVICE_ID	= 1 << 4,
3239 	SRP_OPT_MAX_SECT	= 1 << 5,
3240 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
3241 	SRP_OPT_IO_CLASS	= 1 << 7,
3242 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
3243 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3244 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3245 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
3246 	SRP_OPT_COMP_VECTOR	= 1 << 12,
3247 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
3248 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3249 	SRP_OPT_IP_SRC		= 1 << 15,
3250 	SRP_OPT_IP_DEST		= 1 << 16,
3251 	SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3252 	SRP_OPT_MAX_IT_IU_SIZE  = 1 << 18,
3253 	SRP_OPT_CH_COUNT	= 1 << 19,
3254 };
3255 
3256 static unsigned int srp_opt_mandatory[] = {
3257 	SRP_OPT_ID_EXT		|
3258 	SRP_OPT_IOC_GUID	|
3259 	SRP_OPT_DGID		|
3260 	SRP_OPT_PKEY		|
3261 	SRP_OPT_SERVICE_ID,
3262 	SRP_OPT_ID_EXT		|
3263 	SRP_OPT_IOC_GUID	|
3264 	SRP_OPT_IP_DEST,
3265 };
3266 
3267 static const match_table_t srp_opt_tokens = {
3268 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3269 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3270 	{ SRP_OPT_DGID,			"dgid=%s" 		},
3271 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3272 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3273 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
3274 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
3275 	{ SRP_OPT_TARGET_CAN_QUEUE,	"target_can_queue=%d"	},
3276 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
3277 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
3278 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3279 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3280 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
3281 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
3282 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
3283 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3284 	{ SRP_OPT_IP_SRC,		"src=%s"		},
3285 	{ SRP_OPT_IP_DEST,		"dest=%s"		},
3286 	{ SRP_OPT_MAX_IT_IU_SIZE,	"max_it_iu_size=%d"	},
3287 	{ SRP_OPT_CH_COUNT,		"ch_count=%u",		},
3288 	{ SRP_OPT_ERR,			NULL 			}
3289 };
3290 
3291 /**
3292  * srp_parse_in - parse an IP address and port number combination
3293  * @net:	   [in]  Network namespace.
3294  * @sa:		   [out] Address family, IP address and port number.
3295  * @addr_port_str: [in]  IP address and port number.
3296  * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
3297  *
3298  * Parse the following address formats:
3299  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3300  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3301  */
3302 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3303 			const char *addr_port_str, bool *has_port)
3304 {
3305 	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3306 	char *port_str;
3307 	int ret;
3308 
3309 	if (!addr)
3310 		return -ENOMEM;
3311 	port_str = strrchr(addr, ':');
3312 	if (port_str && strchr(port_str, ']'))
3313 		port_str = NULL;
3314 	if (port_str)
3315 		*port_str++ = '\0';
3316 	if (has_port)
3317 		*has_port = port_str != NULL;
3318 	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3319 	if (ret && addr[0]) {
3320 		addr_end = addr + strlen(addr) - 1;
3321 		if (addr[0] == '[' && *addr_end == ']') {
3322 			*addr_end = '\0';
3323 			ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3324 						   port_str, sa);
3325 		}
3326 	}
3327 	kfree(addr);
3328 	pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3329 	return ret;
3330 }
3331 
3332 static int srp_parse_options(struct net *net, const char *buf,
3333 			     struct srp_target_port *target)
3334 {
3335 	char *options, *sep_opt;
3336 	char *p;
3337 	substring_t args[MAX_OPT_ARGS];
3338 	unsigned long long ull;
3339 	bool has_port;
3340 	int opt_mask = 0;
3341 	int token;
3342 	int ret = -EINVAL;
3343 	int i;
3344 
3345 	options = kstrdup(buf, GFP_KERNEL);
3346 	if (!options)
3347 		return -ENOMEM;
3348 
3349 	sep_opt = options;
3350 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3351 		if (!*p)
3352 			continue;
3353 
3354 		token = match_token(p, srp_opt_tokens, args);
3355 		opt_mask |= token;
3356 
3357 		switch (token) {
3358 		case SRP_OPT_ID_EXT:
3359 			p = match_strdup(args);
3360 			if (!p) {
3361 				ret = -ENOMEM;
3362 				goto out;
3363 			}
3364 			ret = kstrtoull(p, 16, &ull);
3365 			if (ret) {
3366 				pr_warn("invalid id_ext parameter '%s'\n", p);
3367 				kfree(p);
3368 				goto out;
3369 			}
3370 			target->id_ext = cpu_to_be64(ull);
3371 			kfree(p);
3372 			break;
3373 
3374 		case SRP_OPT_IOC_GUID:
3375 			p = match_strdup(args);
3376 			if (!p) {
3377 				ret = -ENOMEM;
3378 				goto out;
3379 			}
3380 			ret = kstrtoull(p, 16, &ull);
3381 			if (ret) {
3382 				pr_warn("invalid ioc_guid parameter '%s'\n", p);
3383 				kfree(p);
3384 				goto out;
3385 			}
3386 			target->ioc_guid = cpu_to_be64(ull);
3387 			kfree(p);
3388 			break;
3389 
3390 		case SRP_OPT_DGID:
3391 			p = match_strdup(args);
3392 			if (!p) {
3393 				ret = -ENOMEM;
3394 				goto out;
3395 			}
3396 			if (strlen(p) != 32) {
3397 				pr_warn("bad dest GID parameter '%s'\n", p);
3398 				kfree(p);
3399 				goto out;
3400 			}
3401 
3402 			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3403 			kfree(p);
3404 			if (ret < 0)
3405 				goto out;
3406 			break;
3407 
3408 		case SRP_OPT_PKEY:
3409 			if (match_hex(args, &token)) {
3410 				pr_warn("bad P_Key parameter '%s'\n", p);
3411 				goto out;
3412 			}
3413 			target->ib_cm.pkey = cpu_to_be16(token);
3414 			break;
3415 
3416 		case SRP_OPT_SERVICE_ID:
3417 			p = match_strdup(args);
3418 			if (!p) {
3419 				ret = -ENOMEM;
3420 				goto out;
3421 			}
3422 			ret = kstrtoull(p, 16, &ull);
3423 			if (ret) {
3424 				pr_warn("bad service_id parameter '%s'\n", p);
3425 				kfree(p);
3426 				goto out;
3427 			}
3428 			target->ib_cm.service_id = cpu_to_be64(ull);
3429 			kfree(p);
3430 			break;
3431 
3432 		case SRP_OPT_IP_SRC:
3433 			p = match_strdup(args);
3434 			if (!p) {
3435 				ret = -ENOMEM;
3436 				goto out;
3437 			}
3438 			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3439 					   NULL);
3440 			if (ret < 0) {
3441 				pr_warn("bad source parameter '%s'\n", p);
3442 				kfree(p);
3443 				goto out;
3444 			}
3445 			target->rdma_cm.src_specified = true;
3446 			kfree(p);
3447 			break;
3448 
3449 		case SRP_OPT_IP_DEST:
3450 			p = match_strdup(args);
3451 			if (!p) {
3452 				ret = -ENOMEM;
3453 				goto out;
3454 			}
3455 			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3456 					   &has_port);
3457 			if (!has_port)
3458 				ret = -EINVAL;
3459 			if (ret < 0) {
3460 				pr_warn("bad dest parameter '%s'\n", p);
3461 				kfree(p);
3462 				goto out;
3463 			}
3464 			target->using_rdma_cm = true;
3465 			kfree(p);
3466 			break;
3467 
3468 		case SRP_OPT_MAX_SECT:
3469 			if (match_int(args, &token)) {
3470 				pr_warn("bad max sect parameter '%s'\n", p);
3471 				goto out;
3472 			}
3473 			target->scsi_host->max_sectors = token;
3474 			break;
3475 
3476 		case SRP_OPT_QUEUE_SIZE:
3477 			if (match_int(args, &token) || token < 1) {
3478 				pr_warn("bad queue_size parameter '%s'\n", p);
3479 				goto out;
3480 			}
3481 			target->scsi_host->can_queue = token;
3482 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3483 					     SRP_TSK_MGMT_SQ_SIZE;
3484 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3485 				target->scsi_host->cmd_per_lun = token;
3486 			break;
3487 
3488 		case SRP_OPT_MAX_CMD_PER_LUN:
3489 			if (match_int(args, &token) || token < 1) {
3490 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3491 					p);
3492 				goto out;
3493 			}
3494 			target->scsi_host->cmd_per_lun = token;
3495 			break;
3496 
3497 		case SRP_OPT_TARGET_CAN_QUEUE:
3498 			if (match_int(args, &token) || token < 1) {
3499 				pr_warn("bad max target_can_queue parameter '%s'\n",
3500 					p);
3501 				goto out;
3502 			}
3503 			target->target_can_queue = token;
3504 			break;
3505 
3506 		case SRP_OPT_IO_CLASS:
3507 			if (match_hex(args, &token)) {
3508 				pr_warn("bad IO class parameter '%s'\n", p);
3509 				goto out;
3510 			}
3511 			if (token != SRP_REV10_IB_IO_CLASS &&
3512 			    token != SRP_REV16A_IB_IO_CLASS) {
3513 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3514 					token, SRP_REV10_IB_IO_CLASS,
3515 					SRP_REV16A_IB_IO_CLASS);
3516 				goto out;
3517 			}
3518 			target->io_class = token;
3519 			break;
3520 
3521 		case SRP_OPT_INITIATOR_EXT:
3522 			p = match_strdup(args);
3523 			if (!p) {
3524 				ret = -ENOMEM;
3525 				goto out;
3526 			}
3527 			ret = kstrtoull(p, 16, &ull);
3528 			if (ret) {
3529 				pr_warn("bad initiator_ext value '%s'\n", p);
3530 				kfree(p);
3531 				goto out;
3532 			}
3533 			target->initiator_ext = cpu_to_be64(ull);
3534 			kfree(p);
3535 			break;
3536 
3537 		case SRP_OPT_CMD_SG_ENTRIES:
3538 			if (match_int(args, &token) || token < 1 || token > 255) {
3539 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3540 					p);
3541 				goto out;
3542 			}
3543 			target->cmd_sg_cnt = token;
3544 			break;
3545 
3546 		case SRP_OPT_ALLOW_EXT_SG:
3547 			if (match_int(args, &token)) {
3548 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3549 				goto out;
3550 			}
3551 			target->allow_ext_sg = !!token;
3552 			break;
3553 
3554 		case SRP_OPT_SG_TABLESIZE:
3555 			if (match_int(args, &token) || token < 1 ||
3556 					token > SG_MAX_SEGMENTS) {
3557 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3558 					p);
3559 				goto out;
3560 			}
3561 			target->sg_tablesize = token;
3562 			break;
3563 
3564 		case SRP_OPT_COMP_VECTOR:
3565 			if (match_int(args, &token) || token < 0) {
3566 				pr_warn("bad comp_vector parameter '%s'\n", p);
3567 				goto out;
3568 			}
3569 			target->comp_vector = token;
3570 			break;
3571 
3572 		case SRP_OPT_TL_RETRY_COUNT:
3573 			if (match_int(args, &token) || token < 2 || token > 7) {
3574 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3575 					p);
3576 				goto out;
3577 			}
3578 			target->tl_retry_count = token;
3579 			break;
3580 
3581 		case SRP_OPT_MAX_IT_IU_SIZE:
3582 			if (match_int(args, &token) || token < 0) {
3583 				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3584 				goto out;
3585 			}
3586 			target->max_it_iu_size = token;
3587 			break;
3588 
3589 		case SRP_OPT_CH_COUNT:
3590 			if (match_int(args, &token) || token < 1) {
3591 				pr_warn("bad channel count %s\n", p);
3592 				goto out;
3593 			}
3594 			target->ch_count = token;
3595 			break;
3596 
3597 		default:
3598 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3599 				p);
3600 			goto out;
3601 		}
3602 	}
3603 
3604 	for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3605 		if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3606 			ret = 0;
3607 			break;
3608 		}
3609 	}
3610 	if (ret)
3611 		pr_warn("target creation request is missing one or more parameters\n");
3612 
3613 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3614 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3615 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3616 			target->scsi_host->cmd_per_lun,
3617 			target->scsi_host->can_queue);
3618 
3619 out:
3620 	kfree(options);
3621 	return ret;
3622 }
3623 
3624 static ssize_t add_target_store(struct device *dev,
3625 				struct device_attribute *attr, const char *buf,
3626 				size_t count)
3627 {
3628 	struct srp_host *host =
3629 		container_of(dev, struct srp_host, dev);
3630 	struct Scsi_Host *target_host;
3631 	struct srp_target_port *target;
3632 	struct srp_rdma_ch *ch;
3633 	struct srp_device *srp_dev = host->srp_dev;
3634 	struct ib_device *ibdev = srp_dev->dev;
3635 	int ret, i, ch_idx;
3636 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3637 	bool multich = false;
3638 	uint32_t max_iu_len;
3639 
3640 	target_host = scsi_host_alloc(&srp_template,
3641 				      sizeof (struct srp_target_port));
3642 	if (!target_host)
3643 		return -ENOMEM;
3644 
3645 	target_host->transportt  = ib_srp_transport_template;
3646 	target_host->max_channel = 0;
3647 	target_host->max_id      = 1;
3648 	target_host->max_lun     = -1LL;
3649 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3650 	target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3651 
3652 	if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
3653 		target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3654 
3655 	target = host_to_target(target_host);
3656 
3657 	target->net		= kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3658 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3659 	target->scsi_host	= target_host;
3660 	target->srp_host	= host;
3661 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3662 	target->global_rkey	= host->srp_dev->global_rkey;
3663 	target->cmd_sg_cnt	= cmd_sg_entries;
3664 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3665 	target->allow_ext_sg	= allow_ext_sg;
3666 	target->tl_retry_count	= 7;
3667 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3668 
3669 	/*
3670 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3671 	 * before this function returns.
3672 	 */
3673 	scsi_host_get(target->scsi_host);
3674 
3675 	ret = mutex_lock_interruptible(&host->add_target_mutex);
3676 	if (ret < 0)
3677 		goto put;
3678 
3679 	ret = srp_parse_options(target->net, buf, target);
3680 	if (ret)
3681 		goto out;
3682 
3683 	if (!srp_conn_unique(target->srp_host, target)) {
3684 		if (target->using_rdma_cm) {
3685 			shost_printk(KERN_INFO, target->scsi_host,
3686 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3687 				     be64_to_cpu(target->id_ext),
3688 				     be64_to_cpu(target->ioc_guid),
3689 				     &target->rdma_cm.dst);
3690 		} else {
3691 			shost_printk(KERN_INFO, target->scsi_host,
3692 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3693 				     be64_to_cpu(target->id_ext),
3694 				     be64_to_cpu(target->ioc_guid),
3695 				     be64_to_cpu(target->initiator_ext));
3696 		}
3697 		ret = -EEXIST;
3698 		goto out;
3699 	}
3700 
3701 	if (!srp_dev->has_fr && !target->allow_ext_sg &&
3702 	    target->cmd_sg_cnt < target->sg_tablesize) {
3703 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3704 		target->sg_tablesize = target->cmd_sg_cnt;
3705 	}
3706 
3707 	if (srp_dev->use_fast_reg) {
3708 		bool gaps_reg = ibdev->attrs.kernel_cap_flags &
3709 				 IBK_SG_GAPS_REG;
3710 
3711 		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3712 				  (ilog2(srp_dev->mr_page_size) - 9);
3713 		if (!gaps_reg) {
3714 			/*
3715 			 * FR can only map one HCA page per entry. If the start
3716 			 * address is not aligned on a HCA page boundary two
3717 			 * entries will be used for the head and the tail
3718 			 * although these two entries combined contain at most
3719 			 * one HCA page of data. Hence the "+ 1" in the
3720 			 * calculation below.
3721 			 *
3722 			 * The indirect data buffer descriptor is contiguous
3723 			 * so the memory for that buffer will only be
3724 			 * registered if register_always is true. Hence add
3725 			 * one to mr_per_cmd if register_always has been set.
3726 			 */
3727 			mr_per_cmd = register_always +
3728 				(target->scsi_host->max_sectors + 1 +
3729 				 max_sectors_per_mr - 1) / max_sectors_per_mr;
3730 		} else {
3731 			mr_per_cmd = register_always +
3732 				(target->sg_tablesize +
3733 				 srp_dev->max_pages_per_mr - 1) /
3734 				srp_dev->max_pages_per_mr;
3735 		}
3736 		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3737 			 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3738 			 max_sectors_per_mr, mr_per_cmd);
3739 	}
3740 
3741 	target_host->sg_tablesize = target->sg_tablesize;
3742 	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3743 	target->mr_per_cmd = mr_per_cmd;
3744 	target->indirect_size = target->sg_tablesize *
3745 				sizeof (struct srp_direct_buf);
3746 	max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3747 				       srp_use_imm_data,
3748 				       target->max_it_iu_size);
3749 
3750 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3751 	INIT_WORK(&target->remove_work, srp_remove_work);
3752 	spin_lock_init(&target->lock);
3753 	ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3754 	if (ret)
3755 		goto out;
3756 
3757 	ret = -ENOMEM;
3758 	if (target->ch_count == 0) {
3759 		target->ch_count =
3760 			min(ch_count ?:
3761 				max(4 * num_online_nodes(),
3762 				    ibdev->num_comp_vectors),
3763 				num_online_cpus());
3764 	}
3765 
3766 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3767 			     GFP_KERNEL);
3768 	if (!target->ch)
3769 		goto out;
3770 
3771 	for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3772 		ch = &target->ch[ch_idx];
3773 		ch->target = target;
3774 		ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3775 		spin_lock_init(&ch->lock);
3776 		INIT_LIST_HEAD(&ch->free_tx);
3777 		ret = srp_new_cm_id(ch);
3778 		if (ret)
3779 			goto err_disconnect;
3780 
3781 		ret = srp_create_ch_ib(ch);
3782 		if (ret)
3783 			goto err_disconnect;
3784 
3785 		ret = srp_connect_ch(ch, max_iu_len, multich);
3786 		if (ret) {
3787 			char dst[64];
3788 
3789 			if (target->using_rdma_cm)
3790 				snprintf(dst, sizeof(dst), "%pIS",
3791 					&target->rdma_cm.dst);
3792 			else
3793 				snprintf(dst, sizeof(dst), "%pI6",
3794 					target->ib_cm.orig_dgid.raw);
3795 			shost_printk(KERN_ERR, target->scsi_host,
3796 				PFX "Connection %d/%d to %s failed\n",
3797 				ch_idx,
3798 				target->ch_count, dst);
3799 			if (ch_idx == 0) {
3800 				goto free_ch;
3801 			} else {
3802 				srp_free_ch_ib(target, ch);
3803 				target->ch_count = ch - target->ch;
3804 				goto connected;
3805 			}
3806 		}
3807 		multich = true;
3808 	}
3809 
3810 connected:
3811 	target->scsi_host->nr_hw_queues = target->ch_count;
3812 
3813 	ret = srp_add_target(host, target);
3814 	if (ret)
3815 		goto err_disconnect;
3816 
3817 	if (target->state != SRP_TARGET_REMOVED) {
3818 		if (target->using_rdma_cm) {
3819 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3820 				     "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3821 				     be64_to_cpu(target->id_ext),
3822 				     be64_to_cpu(target->ioc_guid),
3823 				     target->sgid.raw, &target->rdma_cm.dst);
3824 		} else {
3825 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3826 				     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3827 				     be64_to_cpu(target->id_ext),
3828 				     be64_to_cpu(target->ioc_guid),
3829 				     be16_to_cpu(target->ib_cm.pkey),
3830 				     be64_to_cpu(target->ib_cm.service_id),
3831 				     target->sgid.raw,
3832 				     target->ib_cm.orig_dgid.raw);
3833 		}
3834 	}
3835 
3836 	ret = count;
3837 
3838 out:
3839 	mutex_unlock(&host->add_target_mutex);
3840 
3841 put:
3842 	scsi_host_put(target->scsi_host);
3843 	if (ret < 0) {
3844 		/*
3845 		 * If a call to srp_remove_target() has not been scheduled,
3846 		 * drop the network namespace reference now that was obtained
3847 		 * earlier in this function.
3848 		 */
3849 		if (target->state != SRP_TARGET_REMOVED)
3850 			kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3851 		scsi_host_put(target->scsi_host);
3852 	}
3853 
3854 	return ret;
3855 
3856 err_disconnect:
3857 	srp_disconnect_target(target);
3858 
3859 free_ch:
3860 	for (i = 0; i < target->ch_count; i++) {
3861 		ch = &target->ch[i];
3862 		srp_free_ch_ib(target, ch);
3863 	}
3864 
3865 	kfree(target->ch);
3866 	goto out;
3867 }
3868 
3869 static DEVICE_ATTR_WO(add_target);
3870 
3871 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3872 			  char *buf)
3873 {
3874 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3875 
3876 	return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3877 }
3878 
3879 static DEVICE_ATTR_RO(ibdev);
3880 
3881 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3882 			 char *buf)
3883 {
3884 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3885 
3886 	return sysfs_emit(buf, "%d\n", host->port);
3887 }
3888 
3889 static DEVICE_ATTR_RO(port);
3890 
3891 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3892 {
3893 	struct srp_host *host;
3894 
3895 	host = kzalloc(sizeof *host, GFP_KERNEL);
3896 	if (!host)
3897 		return NULL;
3898 
3899 	INIT_LIST_HEAD(&host->target_list);
3900 	spin_lock_init(&host->target_lock);
3901 	init_completion(&host->released);
3902 	mutex_init(&host->add_target_mutex);
3903 	host->srp_dev = device;
3904 	host->port = port;
3905 
3906 	host->dev.class = &srp_class;
3907 	host->dev.parent = device->dev->dev.parent;
3908 	dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3909 		     port);
3910 
3911 	if (device_register(&host->dev))
3912 		goto free_host;
3913 	if (device_create_file(&host->dev, &dev_attr_add_target))
3914 		goto err_class;
3915 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3916 		goto err_class;
3917 	if (device_create_file(&host->dev, &dev_attr_port))
3918 		goto err_class;
3919 
3920 	return host;
3921 
3922 err_class:
3923 	device_unregister(&host->dev);
3924 
3925 free_host:
3926 	kfree(host);
3927 
3928 	return NULL;
3929 }
3930 
3931 static void srp_rename_dev(struct ib_device *device, void *client_data)
3932 {
3933 	struct srp_device *srp_dev = client_data;
3934 	struct srp_host *host, *tmp_host;
3935 
3936 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3937 		char name[IB_DEVICE_NAME_MAX + 8];
3938 
3939 		snprintf(name, sizeof(name), "srp-%s-%d",
3940 			 dev_name(&device->dev), host->port);
3941 		device_rename(&host->dev, name);
3942 	}
3943 }
3944 
3945 static int srp_add_one(struct ib_device *device)
3946 {
3947 	struct srp_device *srp_dev;
3948 	struct ib_device_attr *attr = &device->attrs;
3949 	struct srp_host *host;
3950 	int mr_page_shift;
3951 	unsigned int p;
3952 	u64 max_pages_per_mr;
3953 	unsigned int flags = 0;
3954 
3955 	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3956 	if (!srp_dev)
3957 		return -ENOMEM;
3958 
3959 	/*
3960 	 * Use the smallest page size supported by the HCA, down to a
3961 	 * minimum of 4096 bytes. We're unlikely to build large sglists
3962 	 * out of smaller entries.
3963 	 */
3964 	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
3965 	srp_dev->mr_page_size	= 1 << mr_page_shift;
3966 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
3967 	max_pages_per_mr	= attr->max_mr_size;
3968 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
3969 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3970 		 attr->max_mr_size, srp_dev->mr_page_size,
3971 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3972 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3973 					  max_pages_per_mr);
3974 
3975 	srp_dev->has_fr = (attr->device_cap_flags &
3976 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
3977 	if (!never_register && !srp_dev->has_fr)
3978 		dev_warn(&device->dev, "FR is not supported\n");
3979 	else if (!never_register &&
3980 		 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3981 		srp_dev->use_fast_reg = srp_dev->has_fr;
3982 
3983 	if (never_register || !register_always || !srp_dev->has_fr)
3984 		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3985 
3986 	if (srp_dev->use_fast_reg) {
3987 		srp_dev->max_pages_per_mr =
3988 			min_t(u32, srp_dev->max_pages_per_mr,
3989 			      attr->max_fast_reg_page_list_len);
3990 	}
3991 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
3992 				   srp_dev->max_pages_per_mr;
3993 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3994 		 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
3995 		 attr->max_fast_reg_page_list_len,
3996 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3997 
3998 	INIT_LIST_HEAD(&srp_dev->dev_list);
3999 
4000 	srp_dev->dev = device;
4001 	srp_dev->pd  = ib_alloc_pd(device, flags);
4002 	if (IS_ERR(srp_dev->pd)) {
4003 		int ret = PTR_ERR(srp_dev->pd);
4004 
4005 		kfree(srp_dev);
4006 		return ret;
4007 	}
4008 
4009 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4010 		srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4011 		WARN_ON_ONCE(srp_dev->global_rkey == 0);
4012 	}
4013 
4014 	rdma_for_each_port (device, p) {
4015 		host = srp_add_port(srp_dev, p);
4016 		if (host)
4017 			list_add_tail(&host->list, &srp_dev->dev_list);
4018 	}
4019 
4020 	ib_set_client_data(device, &srp_client, srp_dev);
4021 	return 0;
4022 }
4023 
4024 static void srp_remove_one(struct ib_device *device, void *client_data)
4025 {
4026 	struct srp_device *srp_dev;
4027 	struct srp_host *host, *tmp_host;
4028 	struct srp_target_port *target;
4029 
4030 	srp_dev = client_data;
4031 
4032 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4033 		device_unregister(&host->dev);
4034 		/*
4035 		 * Wait for the sysfs entry to go away, so that no new
4036 		 * target ports can be created.
4037 		 */
4038 		wait_for_completion(&host->released);
4039 
4040 		/*
4041 		 * Remove all target ports.
4042 		 */
4043 		spin_lock(&host->target_lock);
4044 		list_for_each_entry(target, &host->target_list, list)
4045 			srp_queue_remove_work(target);
4046 		spin_unlock(&host->target_lock);
4047 
4048 		/*
4049 		 * srp_queue_remove_work() queues a call to
4050 		 * srp_remove_target(). The latter function cancels
4051 		 * target->tl_err_work so waiting for the remove works to
4052 		 * finish is sufficient.
4053 		 */
4054 		flush_workqueue(srp_remove_wq);
4055 
4056 		kfree(host);
4057 	}
4058 
4059 	ib_dealloc_pd(srp_dev->pd);
4060 
4061 	kfree(srp_dev);
4062 }
4063 
4064 static struct srp_function_template ib_srp_transport_functions = {
4065 	.has_rport_state	 = true,
4066 	.reset_timer_if_blocked	 = true,
4067 	.reconnect_delay	 = &srp_reconnect_delay,
4068 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
4069 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
4070 	.reconnect		 = srp_rport_reconnect,
4071 	.rport_delete		 = srp_rport_delete,
4072 	.terminate_rport_io	 = srp_terminate_io,
4073 };
4074 
4075 static int __init srp_init_module(void)
4076 {
4077 	int ret;
4078 
4079 	BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4080 	BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4081 	BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4082 	BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4083 	BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4084 	BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4085 	BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4086 
4087 	if (srp_sg_tablesize) {
4088 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4089 		if (!cmd_sg_entries)
4090 			cmd_sg_entries = srp_sg_tablesize;
4091 	}
4092 
4093 	if (!cmd_sg_entries)
4094 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4095 
4096 	if (cmd_sg_entries > 255) {
4097 		pr_warn("Clamping cmd_sg_entries to 255\n");
4098 		cmd_sg_entries = 255;
4099 	}
4100 
4101 	if (!indirect_sg_entries)
4102 		indirect_sg_entries = cmd_sg_entries;
4103 	else if (indirect_sg_entries < cmd_sg_entries) {
4104 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4105 			cmd_sg_entries);
4106 		indirect_sg_entries = cmd_sg_entries;
4107 	}
4108 
4109 	if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4110 		pr_warn("Clamping indirect_sg_entries to %u\n",
4111 			SG_MAX_SEGMENTS);
4112 		indirect_sg_entries = SG_MAX_SEGMENTS;
4113 	}
4114 
4115 	srp_remove_wq = create_workqueue("srp_remove");
4116 	if (!srp_remove_wq) {
4117 		ret = -ENOMEM;
4118 		goto out;
4119 	}
4120 
4121 	ret = -ENOMEM;
4122 	ib_srp_transport_template =
4123 		srp_attach_transport(&ib_srp_transport_functions);
4124 	if (!ib_srp_transport_template)
4125 		goto destroy_wq;
4126 
4127 	ret = class_register(&srp_class);
4128 	if (ret) {
4129 		pr_err("couldn't register class infiniband_srp\n");
4130 		goto release_tr;
4131 	}
4132 
4133 	ib_sa_register_client(&srp_sa_client);
4134 
4135 	ret = ib_register_client(&srp_client);
4136 	if (ret) {
4137 		pr_err("couldn't register IB client\n");
4138 		goto unreg_sa;
4139 	}
4140 
4141 out:
4142 	return ret;
4143 
4144 unreg_sa:
4145 	ib_sa_unregister_client(&srp_sa_client);
4146 	class_unregister(&srp_class);
4147 
4148 release_tr:
4149 	srp_release_transport(ib_srp_transport_template);
4150 
4151 destroy_wq:
4152 	destroy_workqueue(srp_remove_wq);
4153 	goto out;
4154 }
4155 
4156 static void __exit srp_cleanup_module(void)
4157 {
4158 	ib_unregister_client(&srp_client);
4159 	ib_sa_unregister_client(&srp_sa_client);
4160 	class_unregister(&srp_class);
4161 	srp_release_transport(ib_srp_transport_template);
4162 	destroy_workqueue(srp_remove_wq);
4163 }
4164 
4165 module_init(srp_init_module);
4166 module_exit(srp_cleanup_module);
4167