1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
44 
45 #include <linux/atomic.h>
46 
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/srp.h>
52 #include <scsi/scsi_transport_srp.h>
53 
54 #include "ib_srp.h"
55 
56 #define DRV_NAME	"ib_srp"
57 #define PFX		DRV_NAME ": "
58 #define DRV_VERSION	"2.0"
59 #define DRV_RELDATE	"July 26, 2015"
60 
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
66 
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr = true;
72 static bool register_always = true;
73 static int topspin_workarounds = 1;
74 
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77 
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81 
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85 
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89 
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93 
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
97 
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100 		 "Use memory registration even for contiguous memory regions");
101 
102 static const struct kernel_param_ops srp_tmo_ops;
103 
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 		S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108 
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 		S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113 		 "Number of seconds between the observation of a transport"
114 		 " layer error and failing all I/O. \"off\" means that this"
115 		 " functionality is disabled.");
116 
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 		S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(dev_loss_tmo,
121 		 "Maximum number of seconds that the SRP transport should"
122 		 " insulate transport layer errors. After this time has been"
123 		 " exceeded the SCSI host is removed. Should be"
124 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 		 " this functionality is disabled.");
127 
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132 
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
137 		const char *opname);
138 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139 
140 static struct scsi_transport_template *ib_srp_transport_template;
141 static struct workqueue_struct *srp_remove_wq;
142 
143 static struct ib_client srp_client = {
144 	.name   = "srp",
145 	.add    = srp_add_one,
146 	.remove = srp_remove_one
147 };
148 
149 static struct ib_sa_client srp_sa_client;
150 
151 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152 {
153 	int tmo = *(int *)kp->arg;
154 
155 	if (tmo >= 0)
156 		return sprintf(buffer, "%d", tmo);
157 	else
158 		return sprintf(buffer, "off");
159 }
160 
161 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162 {
163 	int tmo, res;
164 
165 	res = srp_parse_tmo(&tmo, val);
166 	if (res)
167 		goto out;
168 
169 	if (kp->arg == &srp_reconnect_delay)
170 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 				    srp_dev_loss_tmo);
172 	else if (kp->arg == &srp_fast_io_fail_tmo)
173 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174 	else
175 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 				    tmo);
177 	if (res)
178 		goto out;
179 	*(int *)kp->arg = tmo;
180 
181 out:
182 	return res;
183 }
184 
185 static const struct kernel_param_ops srp_tmo_ops = {
186 	.get = srp_tmo_get,
187 	.set = srp_tmo_set,
188 };
189 
190 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191 {
192 	return (struct srp_target_port *) host->hostdata;
193 }
194 
195 static const char *srp_target_info(struct Scsi_Host *host)
196 {
197 	return host_to_target(host)->target_name;
198 }
199 
200 static int srp_target_is_topspin(struct srp_target_port *target)
201 {
202 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
203 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
204 
205 	return topspin_workarounds &&
206 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
208 }
209 
210 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 				   gfp_t gfp_mask,
212 				   enum dma_data_direction direction)
213 {
214 	struct srp_iu *iu;
215 
216 	iu = kmalloc(sizeof *iu, gfp_mask);
217 	if (!iu)
218 		goto out;
219 
220 	iu->buf = kzalloc(size, gfp_mask);
221 	if (!iu->buf)
222 		goto out_free_iu;
223 
224 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 				    direction);
226 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
227 		goto out_free_buf;
228 
229 	iu->size      = size;
230 	iu->direction = direction;
231 
232 	return iu;
233 
234 out_free_buf:
235 	kfree(iu->buf);
236 out_free_iu:
237 	kfree(iu);
238 out:
239 	return NULL;
240 }
241 
242 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243 {
244 	if (!iu)
245 		return;
246 
247 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 			    iu->direction);
249 	kfree(iu->buf);
250 	kfree(iu);
251 }
252 
253 static void srp_qp_event(struct ib_event *event, void *context)
254 {
255 	pr_debug("QP event %s (%d)\n",
256 		 ib_event_msg(event->event), event->event);
257 }
258 
259 static int srp_init_qp(struct srp_target_port *target,
260 		       struct ib_qp *qp)
261 {
262 	struct ib_qp_attr *attr;
263 	int ret;
264 
265 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 	if (!attr)
267 		return -ENOMEM;
268 
269 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 				  target->srp_host->port,
271 				  be16_to_cpu(target->pkey),
272 				  &attr->pkey_index);
273 	if (ret)
274 		goto out;
275 
276 	attr->qp_state        = IB_QPS_INIT;
277 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 				    IB_ACCESS_REMOTE_WRITE);
279 	attr->port_num        = target->srp_host->port;
280 
281 	ret = ib_modify_qp(qp, attr,
282 			   IB_QP_STATE		|
283 			   IB_QP_PKEY_INDEX	|
284 			   IB_QP_ACCESS_FLAGS	|
285 			   IB_QP_PORT);
286 
287 out:
288 	kfree(attr);
289 	return ret;
290 }
291 
292 static int srp_new_cm_id(struct srp_rdma_ch *ch)
293 {
294 	struct srp_target_port *target = ch->target;
295 	struct ib_cm_id *new_cm_id;
296 
297 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298 				    srp_cm_handler, ch);
299 	if (IS_ERR(new_cm_id))
300 		return PTR_ERR(new_cm_id);
301 
302 	if (ch->cm_id)
303 		ib_destroy_cm_id(ch->cm_id);
304 	ch->cm_id = new_cm_id;
305 	ch->path.sgid = target->sgid;
306 	ch->path.dgid = target->orig_dgid;
307 	ch->path.pkey = target->pkey;
308 	ch->path.service_id = target->service_id;
309 
310 	return 0;
311 }
312 
313 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314 {
315 	struct srp_device *dev = target->srp_host->srp_dev;
316 	struct ib_fmr_pool_param fmr_param;
317 
318 	memset(&fmr_param, 0, sizeof(fmr_param));
319 	fmr_param.pool_size	    = target->scsi_host->can_queue;
320 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
321 	fmr_param.cache		    = 1;
322 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
324 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
325 				       IB_ACCESS_REMOTE_WRITE |
326 				       IB_ACCESS_REMOTE_READ);
327 
328 	return ib_create_fmr_pool(dev->pd, &fmr_param);
329 }
330 
331 /**
332  * srp_destroy_fr_pool() - free the resources owned by a pool
333  * @pool: Fast registration pool to be destroyed.
334  */
335 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336 {
337 	int i;
338 	struct srp_fr_desc *d;
339 
340 	if (!pool)
341 		return;
342 
343 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 		if (d->mr)
345 			ib_dereg_mr(d->mr);
346 	}
347 	kfree(pool);
348 }
349 
350 /**
351  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352  * @device:            IB device to allocate fast registration descriptors for.
353  * @pd:                Protection domain associated with the FR descriptors.
354  * @pool_size:         Number of descriptors to allocate.
355  * @max_page_list_len: Maximum fast registration work request page list length.
356  */
357 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358 					      struct ib_pd *pd, int pool_size,
359 					      int max_page_list_len)
360 {
361 	struct srp_fr_pool *pool;
362 	struct srp_fr_desc *d;
363 	struct ib_mr *mr;
364 	int i, ret = -EINVAL;
365 
366 	if (pool_size <= 0)
367 		goto err;
368 	ret = -ENOMEM;
369 	pool = kzalloc(sizeof(struct srp_fr_pool) +
370 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
371 	if (!pool)
372 		goto err;
373 	pool->size = pool_size;
374 	pool->max_page_list_len = max_page_list_len;
375 	spin_lock_init(&pool->lock);
376 	INIT_LIST_HEAD(&pool->free_list);
377 
378 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
379 		mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380 				 max_page_list_len);
381 		if (IS_ERR(mr)) {
382 			ret = PTR_ERR(mr);
383 			goto destroy_pool;
384 		}
385 		d->mr = mr;
386 		list_add_tail(&d->entry, &pool->free_list);
387 	}
388 
389 out:
390 	return pool;
391 
392 destroy_pool:
393 	srp_destroy_fr_pool(pool);
394 
395 err:
396 	pool = ERR_PTR(ret);
397 	goto out;
398 }
399 
400 /**
401  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402  * @pool: Pool to obtain descriptor from.
403  */
404 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
405 {
406 	struct srp_fr_desc *d = NULL;
407 	unsigned long flags;
408 
409 	spin_lock_irqsave(&pool->lock, flags);
410 	if (!list_empty(&pool->free_list)) {
411 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
412 		list_del(&d->entry);
413 	}
414 	spin_unlock_irqrestore(&pool->lock, flags);
415 
416 	return d;
417 }
418 
419 /**
420  * srp_fr_pool_put() - put an FR descriptor back in the free list
421  * @pool: Pool the descriptor was allocated from.
422  * @desc: Pointer to an array of fast registration descriptor pointers.
423  * @n:    Number of descriptors to put back.
424  *
425  * Note: The caller must already have queued an invalidation request for
426  * desc->mr->rkey before calling this function.
427  */
428 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
429 			    int n)
430 {
431 	unsigned long flags;
432 	int i;
433 
434 	spin_lock_irqsave(&pool->lock, flags);
435 	for (i = 0; i < n; i++)
436 		list_add(&desc[i]->entry, &pool->free_list);
437 	spin_unlock_irqrestore(&pool->lock, flags);
438 }
439 
440 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
441 {
442 	struct srp_device *dev = target->srp_host->srp_dev;
443 
444 	return srp_create_fr_pool(dev->dev, dev->pd,
445 				  target->scsi_host->can_queue,
446 				  dev->max_pages_per_mr);
447 }
448 
449 /**
450  * srp_destroy_qp() - destroy an RDMA queue pair
451  * @ch: SRP RDMA channel.
452  *
453  * Drain the qp before destroying it.  This avoids that the receive
454  * completion handler can access the queue pair while it is
455  * being destroyed.
456  */
457 static void srp_destroy_qp(struct srp_rdma_ch *ch)
458 {
459 	ib_drain_rq(ch->qp);
460 	ib_destroy_qp(ch->qp);
461 }
462 
463 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
464 {
465 	struct srp_target_port *target = ch->target;
466 	struct srp_device *dev = target->srp_host->srp_dev;
467 	struct ib_qp_init_attr *init_attr;
468 	struct ib_cq *recv_cq, *send_cq;
469 	struct ib_qp *qp;
470 	struct ib_fmr_pool *fmr_pool = NULL;
471 	struct srp_fr_pool *fr_pool = NULL;
472 	const int m = dev->use_fast_reg ? 3 : 1;
473 	int ret;
474 
475 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
476 	if (!init_attr)
477 		return -ENOMEM;
478 
479 	/* queue_size + 1 for ib_drain_rq() */
480 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
481 				ch->comp_vector, IB_POLL_SOFTIRQ);
482 	if (IS_ERR(recv_cq)) {
483 		ret = PTR_ERR(recv_cq);
484 		goto err;
485 	}
486 
487 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
488 				ch->comp_vector, IB_POLL_DIRECT);
489 	if (IS_ERR(send_cq)) {
490 		ret = PTR_ERR(send_cq);
491 		goto err_recv_cq;
492 	}
493 
494 	init_attr->event_handler       = srp_qp_event;
495 	init_attr->cap.max_send_wr     = m * target->queue_size;
496 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
497 	init_attr->cap.max_recv_sge    = 1;
498 	init_attr->cap.max_send_sge    = 1;
499 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
500 	init_attr->qp_type             = IB_QPT_RC;
501 	init_attr->send_cq             = send_cq;
502 	init_attr->recv_cq             = recv_cq;
503 
504 	qp = ib_create_qp(dev->pd, init_attr);
505 	if (IS_ERR(qp)) {
506 		ret = PTR_ERR(qp);
507 		goto err_send_cq;
508 	}
509 
510 	ret = srp_init_qp(target, qp);
511 	if (ret)
512 		goto err_qp;
513 
514 	if (dev->use_fast_reg) {
515 		fr_pool = srp_alloc_fr_pool(target);
516 		if (IS_ERR(fr_pool)) {
517 			ret = PTR_ERR(fr_pool);
518 			shost_printk(KERN_WARNING, target->scsi_host, PFX
519 				     "FR pool allocation failed (%d)\n", ret);
520 			goto err_qp;
521 		}
522 	} else if (dev->use_fmr) {
523 		fmr_pool = srp_alloc_fmr_pool(target);
524 		if (IS_ERR(fmr_pool)) {
525 			ret = PTR_ERR(fmr_pool);
526 			shost_printk(KERN_WARNING, target->scsi_host, PFX
527 				     "FMR pool allocation failed (%d)\n", ret);
528 			goto err_qp;
529 		}
530 	}
531 
532 	if (ch->qp)
533 		srp_destroy_qp(ch);
534 	if (ch->recv_cq)
535 		ib_free_cq(ch->recv_cq);
536 	if (ch->send_cq)
537 		ib_free_cq(ch->send_cq);
538 
539 	ch->qp = qp;
540 	ch->recv_cq = recv_cq;
541 	ch->send_cq = send_cq;
542 
543 	if (dev->use_fast_reg) {
544 		if (ch->fr_pool)
545 			srp_destroy_fr_pool(ch->fr_pool);
546 		ch->fr_pool = fr_pool;
547 	} else if (dev->use_fmr) {
548 		if (ch->fmr_pool)
549 			ib_destroy_fmr_pool(ch->fmr_pool);
550 		ch->fmr_pool = fmr_pool;
551 	}
552 
553 	kfree(init_attr);
554 	return 0;
555 
556 err_qp:
557 	srp_destroy_qp(ch);
558 
559 err_send_cq:
560 	ib_free_cq(send_cq);
561 
562 err_recv_cq:
563 	ib_free_cq(recv_cq);
564 
565 err:
566 	kfree(init_attr);
567 	return ret;
568 }
569 
570 /*
571  * Note: this function may be called without srp_alloc_iu_bufs() having been
572  * invoked. Hence the ch->[rt]x_ring checks.
573  */
574 static void srp_free_ch_ib(struct srp_target_port *target,
575 			   struct srp_rdma_ch *ch)
576 {
577 	struct srp_device *dev = target->srp_host->srp_dev;
578 	int i;
579 
580 	if (!ch->target)
581 		return;
582 
583 	if (ch->cm_id) {
584 		ib_destroy_cm_id(ch->cm_id);
585 		ch->cm_id = NULL;
586 	}
587 
588 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
589 	if (!ch->qp)
590 		return;
591 
592 	if (dev->use_fast_reg) {
593 		if (ch->fr_pool)
594 			srp_destroy_fr_pool(ch->fr_pool);
595 	} else if (dev->use_fmr) {
596 		if (ch->fmr_pool)
597 			ib_destroy_fmr_pool(ch->fmr_pool);
598 	}
599 
600 	srp_destroy_qp(ch);
601 	ib_free_cq(ch->send_cq);
602 	ib_free_cq(ch->recv_cq);
603 
604 	/*
605 	 * Avoid that the SCSI error handler tries to use this channel after
606 	 * it has been freed. The SCSI error handler can namely continue
607 	 * trying to perform recovery actions after scsi_remove_host()
608 	 * returned.
609 	 */
610 	ch->target = NULL;
611 
612 	ch->qp = NULL;
613 	ch->send_cq = ch->recv_cq = NULL;
614 
615 	if (ch->rx_ring) {
616 		for (i = 0; i < target->queue_size; ++i)
617 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
618 		kfree(ch->rx_ring);
619 		ch->rx_ring = NULL;
620 	}
621 	if (ch->tx_ring) {
622 		for (i = 0; i < target->queue_size; ++i)
623 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
624 		kfree(ch->tx_ring);
625 		ch->tx_ring = NULL;
626 	}
627 }
628 
629 static void srp_path_rec_completion(int status,
630 				    struct ib_sa_path_rec *pathrec,
631 				    void *ch_ptr)
632 {
633 	struct srp_rdma_ch *ch = ch_ptr;
634 	struct srp_target_port *target = ch->target;
635 
636 	ch->status = status;
637 	if (status)
638 		shost_printk(KERN_ERR, target->scsi_host,
639 			     PFX "Got failed path rec status %d\n", status);
640 	else
641 		ch->path = *pathrec;
642 	complete(&ch->done);
643 }
644 
645 static int srp_lookup_path(struct srp_rdma_ch *ch)
646 {
647 	struct srp_target_port *target = ch->target;
648 	int ret;
649 
650 	ch->path.numb_path = 1;
651 
652 	init_completion(&ch->done);
653 
654 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
655 					       target->srp_host->srp_dev->dev,
656 					       target->srp_host->port,
657 					       &ch->path,
658 					       IB_SA_PATH_REC_SERVICE_ID |
659 					       IB_SA_PATH_REC_DGID	 |
660 					       IB_SA_PATH_REC_SGID	 |
661 					       IB_SA_PATH_REC_NUMB_PATH	 |
662 					       IB_SA_PATH_REC_PKEY,
663 					       SRP_PATH_REC_TIMEOUT_MS,
664 					       GFP_KERNEL,
665 					       srp_path_rec_completion,
666 					       ch, &ch->path_query);
667 	if (ch->path_query_id < 0)
668 		return ch->path_query_id;
669 
670 	ret = wait_for_completion_interruptible(&ch->done);
671 	if (ret < 0)
672 		return ret;
673 
674 	if (ch->status < 0)
675 		shost_printk(KERN_WARNING, target->scsi_host,
676 			     PFX "Path record query failed\n");
677 
678 	return ch->status;
679 }
680 
681 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
682 {
683 	struct srp_target_port *target = ch->target;
684 	struct {
685 		struct ib_cm_req_param param;
686 		struct srp_login_req   priv;
687 	} *req = NULL;
688 	int status;
689 
690 	req = kzalloc(sizeof *req, GFP_KERNEL);
691 	if (!req)
692 		return -ENOMEM;
693 
694 	req->param.primary_path		      = &ch->path;
695 	req->param.alternate_path 	      = NULL;
696 	req->param.service_id 		      = target->service_id;
697 	req->param.qp_num		      = ch->qp->qp_num;
698 	req->param.qp_type		      = ch->qp->qp_type;
699 	req->param.private_data 	      = &req->priv;
700 	req->param.private_data_len 	      = sizeof req->priv;
701 	req->param.flow_control 	      = 1;
702 
703 	get_random_bytes(&req->param.starting_psn, 4);
704 	req->param.starting_psn 	     &= 0xffffff;
705 
706 	/*
707 	 * Pick some arbitrary defaults here; we could make these
708 	 * module parameters if anyone cared about setting them.
709 	 */
710 	req->param.responder_resources	      = 4;
711 	req->param.remote_cm_response_timeout = 20;
712 	req->param.local_cm_response_timeout  = 20;
713 	req->param.retry_count                = target->tl_retry_count;
714 	req->param.rnr_retry_count 	      = 7;
715 	req->param.max_cm_retries 	      = 15;
716 
717 	req->priv.opcode     	= SRP_LOGIN_REQ;
718 	req->priv.tag        	= 0;
719 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
720 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
721 					      SRP_BUF_FORMAT_INDIRECT);
722 	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
723 				   SRP_MULTICHAN_SINGLE);
724 	/*
725 	 * In the published SRP specification (draft rev. 16a), the
726 	 * port identifier format is 8 bytes of ID extension followed
727 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
728 	 * opposite order, so that the GUID comes first.
729 	 *
730 	 * Targets conforming to these obsolete drafts can be
731 	 * recognized by the I/O Class they report.
732 	 */
733 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
734 		memcpy(req->priv.initiator_port_id,
735 		       &target->sgid.global.interface_id, 8);
736 		memcpy(req->priv.initiator_port_id + 8,
737 		       &target->initiator_ext, 8);
738 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
739 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
740 	} else {
741 		memcpy(req->priv.initiator_port_id,
742 		       &target->initiator_ext, 8);
743 		memcpy(req->priv.initiator_port_id + 8,
744 		       &target->sgid.global.interface_id, 8);
745 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
746 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
747 	}
748 
749 	/*
750 	 * Topspin/Cisco SRP targets will reject our login unless we
751 	 * zero out the first 8 bytes of our initiator port ID and set
752 	 * the second 8 bytes to the local node GUID.
753 	 */
754 	if (srp_target_is_topspin(target)) {
755 		shost_printk(KERN_DEBUG, target->scsi_host,
756 			     PFX "Topspin/Cisco initiator port ID workaround "
757 			     "activated for target GUID %016llx\n",
758 			     be64_to_cpu(target->ioc_guid));
759 		memset(req->priv.initiator_port_id, 0, 8);
760 		memcpy(req->priv.initiator_port_id + 8,
761 		       &target->srp_host->srp_dev->dev->node_guid, 8);
762 	}
763 
764 	status = ib_send_cm_req(ch->cm_id, &req->param);
765 
766 	kfree(req);
767 
768 	return status;
769 }
770 
771 static bool srp_queue_remove_work(struct srp_target_port *target)
772 {
773 	bool changed = false;
774 
775 	spin_lock_irq(&target->lock);
776 	if (target->state != SRP_TARGET_REMOVED) {
777 		target->state = SRP_TARGET_REMOVED;
778 		changed = true;
779 	}
780 	spin_unlock_irq(&target->lock);
781 
782 	if (changed)
783 		queue_work(srp_remove_wq, &target->remove_work);
784 
785 	return changed;
786 }
787 
788 static void srp_disconnect_target(struct srp_target_port *target)
789 {
790 	struct srp_rdma_ch *ch;
791 	int i;
792 
793 	/* XXX should send SRP_I_LOGOUT request */
794 
795 	for (i = 0; i < target->ch_count; i++) {
796 		ch = &target->ch[i];
797 		ch->connected = false;
798 		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
799 			shost_printk(KERN_DEBUG, target->scsi_host,
800 				     PFX "Sending CM DREQ failed\n");
801 		}
802 	}
803 }
804 
805 static void srp_free_req_data(struct srp_target_port *target,
806 			      struct srp_rdma_ch *ch)
807 {
808 	struct srp_device *dev = target->srp_host->srp_dev;
809 	struct ib_device *ibdev = dev->dev;
810 	struct srp_request *req;
811 	int i;
812 
813 	if (!ch->req_ring)
814 		return;
815 
816 	for (i = 0; i < target->req_ring_size; ++i) {
817 		req = &ch->req_ring[i];
818 		if (dev->use_fast_reg) {
819 			kfree(req->fr_list);
820 		} else {
821 			kfree(req->fmr_list);
822 			kfree(req->map_page);
823 		}
824 		if (req->indirect_dma_addr) {
825 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
826 					    target->indirect_size,
827 					    DMA_TO_DEVICE);
828 		}
829 		kfree(req->indirect_desc);
830 	}
831 
832 	kfree(ch->req_ring);
833 	ch->req_ring = NULL;
834 }
835 
836 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
837 {
838 	struct srp_target_port *target = ch->target;
839 	struct srp_device *srp_dev = target->srp_host->srp_dev;
840 	struct ib_device *ibdev = srp_dev->dev;
841 	struct srp_request *req;
842 	void *mr_list;
843 	dma_addr_t dma_addr;
844 	int i, ret = -ENOMEM;
845 
846 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
847 			       GFP_KERNEL);
848 	if (!ch->req_ring)
849 		goto out;
850 
851 	for (i = 0; i < target->req_ring_size; ++i) {
852 		req = &ch->req_ring[i];
853 		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
854 				  GFP_KERNEL);
855 		if (!mr_list)
856 			goto out;
857 		if (srp_dev->use_fast_reg) {
858 			req->fr_list = mr_list;
859 		} else {
860 			req->fmr_list = mr_list;
861 			req->map_page = kmalloc(srp_dev->max_pages_per_mr *
862 						sizeof(void *), GFP_KERNEL);
863 			if (!req->map_page)
864 				goto out;
865 		}
866 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
867 		if (!req->indirect_desc)
868 			goto out;
869 
870 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
871 					     target->indirect_size,
872 					     DMA_TO_DEVICE);
873 		if (ib_dma_mapping_error(ibdev, dma_addr))
874 			goto out;
875 
876 		req->indirect_dma_addr = dma_addr;
877 	}
878 	ret = 0;
879 
880 out:
881 	return ret;
882 }
883 
884 /**
885  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
886  * @shost: SCSI host whose attributes to remove from sysfs.
887  *
888  * Note: Any attributes defined in the host template and that did not exist
889  * before invocation of this function will be ignored.
890  */
891 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
892 {
893 	struct device_attribute **attr;
894 
895 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
896 		device_remove_file(&shost->shost_dev, *attr);
897 }
898 
899 static void srp_remove_target(struct srp_target_port *target)
900 {
901 	struct srp_rdma_ch *ch;
902 	int i;
903 
904 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
905 
906 	srp_del_scsi_host_attr(target->scsi_host);
907 	srp_rport_get(target->rport);
908 	srp_remove_host(target->scsi_host);
909 	scsi_remove_host(target->scsi_host);
910 	srp_stop_rport_timers(target->rport);
911 	srp_disconnect_target(target);
912 	for (i = 0; i < target->ch_count; i++) {
913 		ch = &target->ch[i];
914 		srp_free_ch_ib(target, ch);
915 	}
916 	cancel_work_sync(&target->tl_err_work);
917 	srp_rport_put(target->rport);
918 	for (i = 0; i < target->ch_count; i++) {
919 		ch = &target->ch[i];
920 		srp_free_req_data(target, ch);
921 	}
922 	kfree(target->ch);
923 	target->ch = NULL;
924 
925 	spin_lock(&target->srp_host->target_lock);
926 	list_del(&target->list);
927 	spin_unlock(&target->srp_host->target_lock);
928 
929 	scsi_host_put(target->scsi_host);
930 }
931 
932 static void srp_remove_work(struct work_struct *work)
933 {
934 	struct srp_target_port *target =
935 		container_of(work, struct srp_target_port, remove_work);
936 
937 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
938 
939 	srp_remove_target(target);
940 }
941 
942 static void srp_rport_delete(struct srp_rport *rport)
943 {
944 	struct srp_target_port *target = rport->lld_data;
945 
946 	srp_queue_remove_work(target);
947 }
948 
949 /**
950  * srp_connected_ch() - number of connected channels
951  * @target: SRP target port.
952  */
953 static int srp_connected_ch(struct srp_target_port *target)
954 {
955 	int i, c = 0;
956 
957 	for (i = 0; i < target->ch_count; i++)
958 		c += target->ch[i].connected;
959 
960 	return c;
961 }
962 
963 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
964 {
965 	struct srp_target_port *target = ch->target;
966 	int ret;
967 
968 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
969 
970 	ret = srp_lookup_path(ch);
971 	if (ret)
972 		goto out;
973 
974 	while (1) {
975 		init_completion(&ch->done);
976 		ret = srp_send_req(ch, multich);
977 		if (ret)
978 			goto out;
979 		ret = wait_for_completion_interruptible(&ch->done);
980 		if (ret < 0)
981 			goto out;
982 
983 		/*
984 		 * The CM event handling code will set status to
985 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
986 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
987 		 * redirect REJ back.
988 		 */
989 		ret = ch->status;
990 		switch (ret) {
991 		case 0:
992 			ch->connected = true;
993 			goto out;
994 
995 		case SRP_PORT_REDIRECT:
996 			ret = srp_lookup_path(ch);
997 			if (ret)
998 				goto out;
999 			break;
1000 
1001 		case SRP_DLID_REDIRECT:
1002 			break;
1003 
1004 		case SRP_STALE_CONN:
1005 			shost_printk(KERN_ERR, target->scsi_host, PFX
1006 				     "giving up on stale connection\n");
1007 			ret = -ECONNRESET;
1008 			goto out;
1009 
1010 		default:
1011 			goto out;
1012 		}
1013 	}
1014 
1015 out:
1016 	return ret <= 0 ? ret : -ENODEV;
1017 }
1018 
1019 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1020 {
1021 	srp_handle_qp_err(cq, wc, "INV RKEY");
1022 }
1023 
1024 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1025 		u32 rkey)
1026 {
1027 	struct ib_send_wr *bad_wr;
1028 	struct ib_send_wr wr = {
1029 		.opcode		    = IB_WR_LOCAL_INV,
1030 		.next		    = NULL,
1031 		.num_sge	    = 0,
1032 		.send_flags	    = 0,
1033 		.ex.invalidate_rkey = rkey,
1034 	};
1035 
1036 	wr.wr_cqe = &req->reg_cqe;
1037 	req->reg_cqe.done = srp_inv_rkey_err_done;
1038 	return ib_post_send(ch->qp, &wr, &bad_wr);
1039 }
1040 
1041 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1042 			   struct srp_rdma_ch *ch,
1043 			   struct srp_request *req)
1044 {
1045 	struct srp_target_port *target = ch->target;
1046 	struct srp_device *dev = target->srp_host->srp_dev;
1047 	struct ib_device *ibdev = dev->dev;
1048 	int i, res;
1049 
1050 	if (!scsi_sglist(scmnd) ||
1051 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1052 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1053 		return;
1054 
1055 	if (dev->use_fast_reg) {
1056 		struct srp_fr_desc **pfr;
1057 
1058 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1059 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1060 			if (res < 0) {
1061 				shost_printk(KERN_ERR, target->scsi_host, PFX
1062 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1063 				  (*pfr)->mr->rkey, res);
1064 				queue_work(system_long_wq,
1065 					   &target->tl_err_work);
1066 			}
1067 		}
1068 		if (req->nmdesc)
1069 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1070 					req->nmdesc);
1071 	} else if (dev->use_fmr) {
1072 		struct ib_pool_fmr **pfmr;
1073 
1074 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1075 			ib_fmr_pool_unmap(*pfmr);
1076 	}
1077 
1078 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1079 			scmnd->sc_data_direction);
1080 }
1081 
1082 /**
1083  * srp_claim_req - Take ownership of the scmnd associated with a request.
1084  * @ch: SRP RDMA channel.
1085  * @req: SRP request.
1086  * @sdev: If not NULL, only take ownership for this SCSI device.
1087  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1088  *         ownership of @req->scmnd if it equals @scmnd.
1089  *
1090  * Return value:
1091  * Either NULL or a pointer to the SCSI command the caller became owner of.
1092  */
1093 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1094 				       struct srp_request *req,
1095 				       struct scsi_device *sdev,
1096 				       struct scsi_cmnd *scmnd)
1097 {
1098 	unsigned long flags;
1099 
1100 	spin_lock_irqsave(&ch->lock, flags);
1101 	if (req->scmnd &&
1102 	    (!sdev || req->scmnd->device == sdev) &&
1103 	    (!scmnd || req->scmnd == scmnd)) {
1104 		scmnd = req->scmnd;
1105 		req->scmnd = NULL;
1106 	} else {
1107 		scmnd = NULL;
1108 	}
1109 	spin_unlock_irqrestore(&ch->lock, flags);
1110 
1111 	return scmnd;
1112 }
1113 
1114 /**
1115  * srp_free_req() - Unmap data and add request to the free request list.
1116  * @ch:     SRP RDMA channel.
1117  * @req:    Request to be freed.
1118  * @scmnd:  SCSI command associated with @req.
1119  * @req_lim_delta: Amount to be added to @target->req_lim.
1120  */
1121 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1122 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1123 {
1124 	unsigned long flags;
1125 
1126 	srp_unmap_data(scmnd, ch, req);
1127 
1128 	spin_lock_irqsave(&ch->lock, flags);
1129 	ch->req_lim += req_lim_delta;
1130 	spin_unlock_irqrestore(&ch->lock, flags);
1131 }
1132 
1133 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 			   struct scsi_device *sdev, int result)
1135 {
1136 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1137 
1138 	if (scmnd) {
1139 		srp_free_req(ch, req, scmnd, 0);
1140 		scmnd->result = result;
1141 		scmnd->scsi_done(scmnd);
1142 	}
1143 }
1144 
1145 static void srp_terminate_io(struct srp_rport *rport)
1146 {
1147 	struct srp_target_port *target = rport->lld_data;
1148 	struct srp_rdma_ch *ch;
1149 	struct Scsi_Host *shost = target->scsi_host;
1150 	struct scsi_device *sdev;
1151 	int i, j;
1152 
1153 	/*
1154 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1155 	 * is not safe. Hence the warning statement below.
1156 	 */
1157 	shost_for_each_device(sdev, shost)
1158 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1159 
1160 	for (i = 0; i < target->ch_count; i++) {
1161 		ch = &target->ch[i];
1162 
1163 		for (j = 0; j < target->req_ring_size; ++j) {
1164 			struct srp_request *req = &ch->req_ring[j];
1165 
1166 			srp_finish_req(ch, req, NULL,
1167 				       DID_TRANSPORT_FAILFAST << 16);
1168 		}
1169 	}
1170 }
1171 
1172 /*
1173  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1174  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1175  * srp_reset_device() or srp_reset_host() calls will occur while this function
1176  * is in progress. One way to realize that is not to call this function
1177  * directly but to call srp_reconnect_rport() instead since that last function
1178  * serializes calls of this function via rport->mutex and also blocks
1179  * srp_queuecommand() calls before invoking this function.
1180  */
1181 static int srp_rport_reconnect(struct srp_rport *rport)
1182 {
1183 	struct srp_target_port *target = rport->lld_data;
1184 	struct srp_rdma_ch *ch;
1185 	int i, j, ret = 0;
1186 	bool multich = false;
1187 
1188 	srp_disconnect_target(target);
1189 
1190 	if (target->state == SRP_TARGET_SCANNING)
1191 		return -ENODEV;
1192 
1193 	/*
1194 	 * Now get a new local CM ID so that we avoid confusing the target in
1195 	 * case things are really fouled up. Doing so also ensures that all CM
1196 	 * callbacks will have finished before a new QP is allocated.
1197 	 */
1198 	for (i = 0; i < target->ch_count; i++) {
1199 		ch = &target->ch[i];
1200 		ret += srp_new_cm_id(ch);
1201 	}
1202 	for (i = 0; i < target->ch_count; i++) {
1203 		ch = &target->ch[i];
1204 		for (j = 0; j < target->req_ring_size; ++j) {
1205 			struct srp_request *req = &ch->req_ring[j];
1206 
1207 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1208 		}
1209 	}
1210 	for (i = 0; i < target->ch_count; i++) {
1211 		ch = &target->ch[i];
1212 		/*
1213 		 * Whether or not creating a new CM ID succeeded, create a new
1214 		 * QP. This guarantees that all completion callback function
1215 		 * invocations have finished before request resetting starts.
1216 		 */
1217 		ret += srp_create_ch_ib(ch);
1218 
1219 		INIT_LIST_HEAD(&ch->free_tx);
1220 		for (j = 0; j < target->queue_size; ++j)
1221 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1222 	}
1223 
1224 	target->qp_in_error = false;
1225 
1226 	for (i = 0; i < target->ch_count; i++) {
1227 		ch = &target->ch[i];
1228 		if (ret)
1229 			break;
1230 		ret = srp_connect_ch(ch, multich);
1231 		multich = true;
1232 	}
1233 
1234 	if (ret == 0)
1235 		shost_printk(KERN_INFO, target->scsi_host,
1236 			     PFX "reconnect succeeded\n");
1237 
1238 	return ret;
1239 }
1240 
1241 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1242 			 unsigned int dma_len, u32 rkey)
1243 {
1244 	struct srp_direct_buf *desc = state->desc;
1245 
1246 	WARN_ON_ONCE(!dma_len);
1247 
1248 	desc->va = cpu_to_be64(dma_addr);
1249 	desc->key = cpu_to_be32(rkey);
1250 	desc->len = cpu_to_be32(dma_len);
1251 
1252 	state->total_len += dma_len;
1253 	state->desc++;
1254 	state->ndesc++;
1255 }
1256 
1257 static int srp_map_finish_fmr(struct srp_map_state *state,
1258 			      struct srp_rdma_ch *ch)
1259 {
1260 	struct srp_target_port *target = ch->target;
1261 	struct srp_device *dev = target->srp_host->srp_dev;
1262 	struct ib_pool_fmr *fmr;
1263 	u64 io_addr = 0;
1264 
1265 	if (state->fmr.next >= state->fmr.end)
1266 		return -ENOMEM;
1267 
1268 	WARN_ON_ONCE(!dev->use_fmr);
1269 
1270 	if (state->npages == 0)
1271 		return 0;
1272 
1273 	if (state->npages == 1 && target->global_mr) {
1274 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
1275 			     target->global_mr->rkey);
1276 		goto reset_state;
1277 	}
1278 
1279 	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1280 				   state->npages, io_addr);
1281 	if (IS_ERR(fmr))
1282 		return PTR_ERR(fmr);
1283 
1284 	*state->fmr.next++ = fmr;
1285 	state->nmdesc++;
1286 
1287 	srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1288 		     state->dma_len, fmr->fmr->rkey);
1289 
1290 reset_state:
1291 	state->npages = 0;
1292 	state->dma_len = 0;
1293 
1294 	return 0;
1295 }
1296 
1297 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1298 {
1299 	srp_handle_qp_err(cq, wc, "FAST REG");
1300 }
1301 
1302 static int srp_map_finish_fr(struct srp_map_state *state,
1303 			     struct srp_request *req,
1304 			     struct srp_rdma_ch *ch, int sg_nents)
1305 {
1306 	struct srp_target_port *target = ch->target;
1307 	struct srp_device *dev = target->srp_host->srp_dev;
1308 	struct ib_send_wr *bad_wr;
1309 	struct ib_reg_wr wr;
1310 	struct srp_fr_desc *desc;
1311 	u32 rkey;
1312 	int n, err;
1313 
1314 	if (state->fr.next >= state->fr.end)
1315 		return -ENOMEM;
1316 
1317 	WARN_ON_ONCE(!dev->use_fast_reg);
1318 
1319 	if (sg_nents == 0)
1320 		return 0;
1321 
1322 	if (sg_nents == 1 && target->global_mr) {
1323 		srp_map_desc(state, sg_dma_address(state->sg),
1324 			     sg_dma_len(state->sg),
1325 			     target->global_mr->rkey);
1326 		return 1;
1327 	}
1328 
1329 	desc = srp_fr_pool_get(ch->fr_pool);
1330 	if (!desc)
1331 		return -ENOMEM;
1332 
1333 	rkey = ib_inc_rkey(desc->mr->rkey);
1334 	ib_update_fast_reg_key(desc->mr, rkey);
1335 
1336 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
1337 	if (unlikely(n < 0))
1338 		return n;
1339 
1340 	req->reg_cqe.done = srp_reg_mr_err_done;
1341 
1342 	wr.wr.next = NULL;
1343 	wr.wr.opcode = IB_WR_REG_MR;
1344 	wr.wr.wr_cqe = &req->reg_cqe;
1345 	wr.wr.num_sge = 0;
1346 	wr.wr.send_flags = 0;
1347 	wr.mr = desc->mr;
1348 	wr.key = desc->mr->rkey;
1349 	wr.access = (IB_ACCESS_LOCAL_WRITE |
1350 		     IB_ACCESS_REMOTE_READ |
1351 		     IB_ACCESS_REMOTE_WRITE);
1352 
1353 	*state->fr.next++ = desc;
1354 	state->nmdesc++;
1355 
1356 	srp_map_desc(state, desc->mr->iova,
1357 		     desc->mr->length, desc->mr->rkey);
1358 
1359 	err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1360 	if (unlikely(err))
1361 		return err;
1362 
1363 	return n;
1364 }
1365 
1366 static int srp_map_sg_entry(struct srp_map_state *state,
1367 			    struct srp_rdma_ch *ch,
1368 			    struct scatterlist *sg, int sg_index)
1369 {
1370 	struct srp_target_port *target = ch->target;
1371 	struct srp_device *dev = target->srp_host->srp_dev;
1372 	struct ib_device *ibdev = dev->dev;
1373 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1374 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1375 	unsigned int len = 0;
1376 	int ret;
1377 
1378 	WARN_ON_ONCE(!dma_len);
1379 
1380 	while (dma_len) {
1381 		unsigned offset = dma_addr & ~dev->mr_page_mask;
1382 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1383 			ret = srp_map_finish_fmr(state, ch);
1384 			if (ret)
1385 				return ret;
1386 		}
1387 
1388 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1389 
1390 		if (!state->npages)
1391 			state->base_dma_addr = dma_addr;
1392 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1393 		state->dma_len += len;
1394 		dma_addr += len;
1395 		dma_len -= len;
1396 	}
1397 
1398 	/*
1399 	 * If the last entry of the MR wasn't a full page, then we need to
1400 	 * close it out and start a new one -- we can only merge at page
1401 	 * boundries.
1402 	 */
1403 	ret = 0;
1404 	if (len != dev->mr_page_size)
1405 		ret = srp_map_finish_fmr(state, ch);
1406 	return ret;
1407 }
1408 
1409 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1410 			  struct srp_request *req, struct scatterlist *scat,
1411 			  int count)
1412 {
1413 	struct scatterlist *sg;
1414 	int i, ret;
1415 
1416 	state->desc = req->indirect_desc;
1417 	state->pages = req->map_page;
1418 	state->fmr.next = req->fmr_list;
1419 	state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1420 
1421 	for_each_sg(scat, sg, count, i) {
1422 		ret = srp_map_sg_entry(state, ch, sg, i);
1423 		if (ret)
1424 			return ret;
1425 	}
1426 
1427 	ret = srp_map_finish_fmr(state, ch);
1428 	if (ret)
1429 		return ret;
1430 
1431 	req->nmdesc = state->nmdesc;
1432 
1433 	return 0;
1434 }
1435 
1436 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1437 			 struct srp_request *req, struct scatterlist *scat,
1438 			 int count)
1439 {
1440 	state->desc = req->indirect_desc;
1441 	state->fr.next = req->fr_list;
1442 	state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1443 	state->sg = scat;
1444 
1445 	while (count) {
1446 		int i, n;
1447 
1448 		n = srp_map_finish_fr(state, req, ch, count);
1449 		if (unlikely(n < 0))
1450 			return n;
1451 
1452 		count -= n;
1453 		for (i = 0; i < n; i++)
1454 			state->sg = sg_next(state->sg);
1455 	}
1456 
1457 	req->nmdesc = state->nmdesc;
1458 
1459 	return 0;
1460 }
1461 
1462 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1463 			  struct srp_request *req, struct scatterlist *scat,
1464 			  int count)
1465 {
1466 	struct srp_target_port *target = ch->target;
1467 	struct srp_device *dev = target->srp_host->srp_dev;
1468 	struct scatterlist *sg;
1469 	int i;
1470 
1471 	state->desc = req->indirect_desc;
1472 	for_each_sg(scat, sg, count, i) {
1473 		srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1474 			     ib_sg_dma_len(dev->dev, sg),
1475 			     target->global_mr->rkey);
1476 	}
1477 
1478 	req->nmdesc = state->nmdesc;
1479 
1480 	return 0;
1481 }
1482 
1483 /*
1484  * Register the indirect data buffer descriptor with the HCA.
1485  *
1486  * Note: since the indirect data buffer descriptor has been allocated with
1487  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1488  * memory buffer.
1489  */
1490 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1491 		       void **next_mr, void **end_mr, u32 idb_len,
1492 		       __be32 *idb_rkey)
1493 {
1494 	struct srp_target_port *target = ch->target;
1495 	struct srp_device *dev = target->srp_host->srp_dev;
1496 	struct srp_map_state state;
1497 	struct srp_direct_buf idb_desc;
1498 	u64 idb_pages[1];
1499 	struct scatterlist idb_sg[1];
1500 	int ret;
1501 
1502 	memset(&state, 0, sizeof(state));
1503 	memset(&idb_desc, 0, sizeof(idb_desc));
1504 	state.gen.next = next_mr;
1505 	state.gen.end = end_mr;
1506 	state.desc = &idb_desc;
1507 	state.base_dma_addr = req->indirect_dma_addr;
1508 	state.dma_len = idb_len;
1509 
1510 	if (dev->use_fast_reg) {
1511 		state.sg = idb_sg;
1512 		sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1513 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1514 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1515 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1516 #endif
1517 		ret = srp_map_finish_fr(&state, req, ch, 1);
1518 		if (ret < 0)
1519 			return ret;
1520 	} else if (dev->use_fmr) {
1521 		state.pages = idb_pages;
1522 		state.pages[0] = (req->indirect_dma_addr &
1523 				  dev->mr_page_mask);
1524 		state.npages = 1;
1525 		ret = srp_map_finish_fmr(&state, ch);
1526 		if (ret < 0)
1527 			return ret;
1528 	} else {
1529 		return -EINVAL;
1530 	}
1531 
1532 	*idb_rkey = idb_desc.key;
1533 
1534 	return 0;
1535 }
1536 
1537 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1538 			struct srp_request *req)
1539 {
1540 	struct srp_target_port *target = ch->target;
1541 	struct scatterlist *scat;
1542 	struct srp_cmd *cmd = req->cmd->buf;
1543 	int len, nents, count, ret;
1544 	struct srp_device *dev;
1545 	struct ib_device *ibdev;
1546 	struct srp_map_state state;
1547 	struct srp_indirect_buf *indirect_hdr;
1548 	u32 idb_len, table_len;
1549 	__be32 idb_rkey;
1550 	u8 fmt;
1551 
1552 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1553 		return sizeof (struct srp_cmd);
1554 
1555 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1556 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1557 		shost_printk(KERN_WARNING, target->scsi_host,
1558 			     PFX "Unhandled data direction %d\n",
1559 			     scmnd->sc_data_direction);
1560 		return -EINVAL;
1561 	}
1562 
1563 	nents = scsi_sg_count(scmnd);
1564 	scat  = scsi_sglist(scmnd);
1565 
1566 	dev = target->srp_host->srp_dev;
1567 	ibdev = dev->dev;
1568 
1569 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1570 	if (unlikely(count == 0))
1571 		return -EIO;
1572 
1573 	fmt = SRP_DATA_DESC_DIRECT;
1574 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1575 
1576 	if (count == 1 && target->global_mr) {
1577 		/*
1578 		 * The midlayer only generated a single gather/scatter
1579 		 * entry, or DMA mapping coalesced everything to a
1580 		 * single entry.  So a direct descriptor along with
1581 		 * the DMA MR suffices.
1582 		 */
1583 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1584 
1585 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1586 		buf->key = cpu_to_be32(target->global_mr->rkey);
1587 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1588 
1589 		req->nmdesc = 0;
1590 		goto map_complete;
1591 	}
1592 
1593 	/*
1594 	 * We have more than one scatter/gather entry, so build our indirect
1595 	 * descriptor table, trying to merge as many entries as we can.
1596 	 */
1597 	indirect_hdr = (void *) cmd->add_data;
1598 
1599 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1600 				   target->indirect_size, DMA_TO_DEVICE);
1601 
1602 	memset(&state, 0, sizeof(state));
1603 	if (dev->use_fast_reg)
1604 		srp_map_sg_fr(&state, ch, req, scat, count);
1605 	else if (dev->use_fmr)
1606 		srp_map_sg_fmr(&state, ch, req, scat, count);
1607 	else
1608 		srp_map_sg_dma(&state, ch, req, scat, count);
1609 
1610 	/* We've mapped the request, now pull as much of the indirect
1611 	 * descriptor table as we can into the command buffer. If this
1612 	 * target is not using an external indirect table, we are
1613 	 * guaranteed to fit into the command, as the SCSI layer won't
1614 	 * give us more S/G entries than we allow.
1615 	 */
1616 	if (state.ndesc == 1) {
1617 		/*
1618 		 * Memory registration collapsed the sg-list into one entry,
1619 		 * so use a direct descriptor.
1620 		 */
1621 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1622 
1623 		*buf = req->indirect_desc[0];
1624 		goto map_complete;
1625 	}
1626 
1627 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1628 						!target->allow_ext_sg)) {
1629 		shost_printk(KERN_ERR, target->scsi_host,
1630 			     "Could not fit S/G list into SRP_CMD\n");
1631 		return -EIO;
1632 	}
1633 
1634 	count = min(state.ndesc, target->cmd_sg_cnt);
1635 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1636 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1637 
1638 	fmt = SRP_DATA_DESC_INDIRECT;
1639 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1640 	len += count * sizeof (struct srp_direct_buf);
1641 
1642 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1643 	       count * sizeof (struct srp_direct_buf));
1644 
1645 	if (!target->global_mr) {
1646 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1647 				  idb_len, &idb_rkey);
1648 		if (ret < 0)
1649 			return ret;
1650 		req->nmdesc++;
1651 	} else {
1652 		idb_rkey = cpu_to_be32(target->global_mr->rkey);
1653 	}
1654 
1655 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1656 	indirect_hdr->table_desc.key = idb_rkey;
1657 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1658 	indirect_hdr->len = cpu_to_be32(state.total_len);
1659 
1660 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1661 		cmd->data_out_desc_cnt = count;
1662 	else
1663 		cmd->data_in_desc_cnt = count;
1664 
1665 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1666 				      DMA_TO_DEVICE);
1667 
1668 map_complete:
1669 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1670 		cmd->buf_fmt = fmt << 4;
1671 	else
1672 		cmd->buf_fmt = fmt;
1673 
1674 	return len;
1675 }
1676 
1677 /*
1678  * Return an IU and possible credit to the free pool
1679  */
1680 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1681 			  enum srp_iu_type iu_type)
1682 {
1683 	unsigned long flags;
1684 
1685 	spin_lock_irqsave(&ch->lock, flags);
1686 	list_add(&iu->list, &ch->free_tx);
1687 	if (iu_type != SRP_IU_RSP)
1688 		++ch->req_lim;
1689 	spin_unlock_irqrestore(&ch->lock, flags);
1690 }
1691 
1692 /*
1693  * Must be called with ch->lock held to protect req_lim and free_tx.
1694  * If IU is not sent, it must be returned using srp_put_tx_iu().
1695  *
1696  * Note:
1697  * An upper limit for the number of allocated information units for each
1698  * request type is:
1699  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1700  *   more than Scsi_Host.can_queue requests.
1701  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1702  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1703  *   one unanswered SRP request to an initiator.
1704  */
1705 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1706 				      enum srp_iu_type iu_type)
1707 {
1708 	struct srp_target_port *target = ch->target;
1709 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1710 	struct srp_iu *iu;
1711 
1712 	ib_process_cq_direct(ch->send_cq, -1);
1713 
1714 	if (list_empty(&ch->free_tx))
1715 		return NULL;
1716 
1717 	/* Initiator responses to target requests do not consume credits */
1718 	if (iu_type != SRP_IU_RSP) {
1719 		if (ch->req_lim <= rsv) {
1720 			++target->zero_req_lim;
1721 			return NULL;
1722 		}
1723 
1724 		--ch->req_lim;
1725 	}
1726 
1727 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1728 	list_del(&iu->list);
1729 	return iu;
1730 }
1731 
1732 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1733 {
1734 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1735 	struct srp_rdma_ch *ch = cq->cq_context;
1736 
1737 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1738 		srp_handle_qp_err(cq, wc, "SEND");
1739 		return;
1740 	}
1741 
1742 	list_add(&iu->list, &ch->free_tx);
1743 }
1744 
1745 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1746 {
1747 	struct srp_target_port *target = ch->target;
1748 	struct ib_sge list;
1749 	struct ib_send_wr wr, *bad_wr;
1750 
1751 	list.addr   = iu->dma;
1752 	list.length = len;
1753 	list.lkey   = target->lkey;
1754 
1755 	iu->cqe.done = srp_send_done;
1756 
1757 	wr.next       = NULL;
1758 	wr.wr_cqe     = &iu->cqe;
1759 	wr.sg_list    = &list;
1760 	wr.num_sge    = 1;
1761 	wr.opcode     = IB_WR_SEND;
1762 	wr.send_flags = IB_SEND_SIGNALED;
1763 
1764 	return ib_post_send(ch->qp, &wr, &bad_wr);
1765 }
1766 
1767 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1768 {
1769 	struct srp_target_port *target = ch->target;
1770 	struct ib_recv_wr wr, *bad_wr;
1771 	struct ib_sge list;
1772 
1773 	list.addr   = iu->dma;
1774 	list.length = iu->size;
1775 	list.lkey   = target->lkey;
1776 
1777 	iu->cqe.done = srp_recv_done;
1778 
1779 	wr.next     = NULL;
1780 	wr.wr_cqe   = &iu->cqe;
1781 	wr.sg_list  = &list;
1782 	wr.num_sge  = 1;
1783 
1784 	return ib_post_recv(ch->qp, &wr, &bad_wr);
1785 }
1786 
1787 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1788 {
1789 	struct srp_target_port *target = ch->target;
1790 	struct srp_request *req;
1791 	struct scsi_cmnd *scmnd;
1792 	unsigned long flags;
1793 
1794 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1795 		spin_lock_irqsave(&ch->lock, flags);
1796 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1797 		spin_unlock_irqrestore(&ch->lock, flags);
1798 
1799 		ch->tsk_mgmt_status = -1;
1800 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1801 			ch->tsk_mgmt_status = rsp->data[3];
1802 		complete(&ch->tsk_mgmt_done);
1803 	} else {
1804 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1805 		if (scmnd) {
1806 			req = (void *)scmnd->host_scribble;
1807 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1808 		}
1809 		if (!scmnd) {
1810 			shost_printk(KERN_ERR, target->scsi_host,
1811 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1812 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1813 
1814 			spin_lock_irqsave(&ch->lock, flags);
1815 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1816 			spin_unlock_irqrestore(&ch->lock, flags);
1817 
1818 			return;
1819 		}
1820 		scmnd->result = rsp->status;
1821 
1822 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1823 			memcpy(scmnd->sense_buffer, rsp->data +
1824 			       be32_to_cpu(rsp->resp_data_len),
1825 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1826 				     SCSI_SENSE_BUFFERSIZE));
1827 		}
1828 
1829 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1830 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1831 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1832 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1833 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1834 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1835 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1836 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1837 
1838 		srp_free_req(ch, req, scmnd,
1839 			     be32_to_cpu(rsp->req_lim_delta));
1840 
1841 		scmnd->host_scribble = NULL;
1842 		scmnd->scsi_done(scmnd);
1843 	}
1844 }
1845 
1846 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1847 			       void *rsp, int len)
1848 {
1849 	struct srp_target_port *target = ch->target;
1850 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1851 	unsigned long flags;
1852 	struct srp_iu *iu;
1853 	int err;
1854 
1855 	spin_lock_irqsave(&ch->lock, flags);
1856 	ch->req_lim += req_delta;
1857 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1858 	spin_unlock_irqrestore(&ch->lock, flags);
1859 
1860 	if (!iu) {
1861 		shost_printk(KERN_ERR, target->scsi_host, PFX
1862 			     "no IU available to send response\n");
1863 		return 1;
1864 	}
1865 
1866 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1867 	memcpy(iu->buf, rsp, len);
1868 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1869 
1870 	err = srp_post_send(ch, iu, len);
1871 	if (err) {
1872 		shost_printk(KERN_ERR, target->scsi_host, PFX
1873 			     "unable to post response: %d\n", err);
1874 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1875 	}
1876 
1877 	return err;
1878 }
1879 
1880 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1881 				 struct srp_cred_req *req)
1882 {
1883 	struct srp_cred_rsp rsp = {
1884 		.opcode = SRP_CRED_RSP,
1885 		.tag = req->tag,
1886 	};
1887 	s32 delta = be32_to_cpu(req->req_lim_delta);
1888 
1889 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1890 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1891 			     "problems processing SRP_CRED_REQ\n");
1892 }
1893 
1894 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1895 				struct srp_aer_req *req)
1896 {
1897 	struct srp_target_port *target = ch->target;
1898 	struct srp_aer_rsp rsp = {
1899 		.opcode = SRP_AER_RSP,
1900 		.tag = req->tag,
1901 	};
1902 	s32 delta = be32_to_cpu(req->req_lim_delta);
1903 
1904 	shost_printk(KERN_ERR, target->scsi_host, PFX
1905 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1906 
1907 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1908 		shost_printk(KERN_ERR, target->scsi_host, PFX
1909 			     "problems processing SRP_AER_REQ\n");
1910 }
1911 
1912 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1913 {
1914 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1915 	struct srp_rdma_ch *ch = cq->cq_context;
1916 	struct srp_target_port *target = ch->target;
1917 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1918 	int res;
1919 	u8 opcode;
1920 
1921 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1922 		srp_handle_qp_err(cq, wc, "RECV");
1923 		return;
1924 	}
1925 
1926 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1927 				   DMA_FROM_DEVICE);
1928 
1929 	opcode = *(u8 *) iu->buf;
1930 
1931 	if (0) {
1932 		shost_printk(KERN_ERR, target->scsi_host,
1933 			     PFX "recv completion, opcode 0x%02x\n", opcode);
1934 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1935 			       iu->buf, wc->byte_len, true);
1936 	}
1937 
1938 	switch (opcode) {
1939 	case SRP_RSP:
1940 		srp_process_rsp(ch, iu->buf);
1941 		break;
1942 
1943 	case SRP_CRED_REQ:
1944 		srp_process_cred_req(ch, iu->buf);
1945 		break;
1946 
1947 	case SRP_AER_REQ:
1948 		srp_process_aer_req(ch, iu->buf);
1949 		break;
1950 
1951 	case SRP_T_LOGOUT:
1952 		/* XXX Handle target logout */
1953 		shost_printk(KERN_WARNING, target->scsi_host,
1954 			     PFX "Got target logout request\n");
1955 		break;
1956 
1957 	default:
1958 		shost_printk(KERN_WARNING, target->scsi_host,
1959 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1960 		break;
1961 	}
1962 
1963 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1964 				      DMA_FROM_DEVICE);
1965 
1966 	res = srp_post_recv(ch, iu);
1967 	if (res != 0)
1968 		shost_printk(KERN_ERR, target->scsi_host,
1969 			     PFX "Recv failed with error code %d\n", res);
1970 }
1971 
1972 /**
1973  * srp_tl_err_work() - handle a transport layer error
1974  * @work: Work structure embedded in an SRP target port.
1975  *
1976  * Note: This function may get invoked before the rport has been created,
1977  * hence the target->rport test.
1978  */
1979 static void srp_tl_err_work(struct work_struct *work)
1980 {
1981 	struct srp_target_port *target;
1982 
1983 	target = container_of(work, struct srp_target_port, tl_err_work);
1984 	if (target->rport)
1985 		srp_start_tl_fail_timers(target->rport);
1986 }
1987 
1988 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
1989 		const char *opname)
1990 {
1991 	struct srp_rdma_ch *ch = cq->cq_context;
1992 	struct srp_target_port *target = ch->target;
1993 
1994 	if (ch->connected && !target->qp_in_error) {
1995 		shost_printk(KERN_ERR, target->scsi_host,
1996 			     PFX "failed %s status %s (%d) for CQE %p\n",
1997 			     opname, ib_wc_status_msg(wc->status), wc->status,
1998 			     wc->wr_cqe);
1999 		queue_work(system_long_wq, &target->tl_err_work);
2000 	}
2001 	target->qp_in_error = true;
2002 }
2003 
2004 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2005 {
2006 	struct srp_target_port *target = host_to_target(shost);
2007 	struct srp_rport *rport = target->rport;
2008 	struct srp_rdma_ch *ch;
2009 	struct srp_request *req;
2010 	struct srp_iu *iu;
2011 	struct srp_cmd *cmd;
2012 	struct ib_device *dev;
2013 	unsigned long flags;
2014 	u32 tag;
2015 	u16 idx;
2016 	int len, ret;
2017 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2018 
2019 	/*
2020 	 * The SCSI EH thread is the only context from which srp_queuecommand()
2021 	 * can get invoked for blocked devices (SDEV_BLOCK /
2022 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2023 	 * locking the rport mutex if invoked from inside the SCSI EH.
2024 	 */
2025 	if (in_scsi_eh)
2026 		mutex_lock(&rport->mutex);
2027 
2028 	scmnd->result = srp_chkready(target->rport);
2029 	if (unlikely(scmnd->result))
2030 		goto err;
2031 
2032 	WARN_ON_ONCE(scmnd->request->tag < 0);
2033 	tag = blk_mq_unique_tag(scmnd->request);
2034 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2035 	idx = blk_mq_unique_tag_to_tag(tag);
2036 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2037 		  dev_name(&shost->shost_gendev), tag, idx,
2038 		  target->req_ring_size);
2039 
2040 	spin_lock_irqsave(&ch->lock, flags);
2041 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2042 	spin_unlock_irqrestore(&ch->lock, flags);
2043 
2044 	if (!iu)
2045 		goto err;
2046 
2047 	req = &ch->req_ring[idx];
2048 	dev = target->srp_host->srp_dev->dev;
2049 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2050 				   DMA_TO_DEVICE);
2051 
2052 	scmnd->host_scribble = (void *) req;
2053 
2054 	cmd = iu->buf;
2055 	memset(cmd, 0, sizeof *cmd);
2056 
2057 	cmd->opcode = SRP_CMD;
2058 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2059 	cmd->tag    = tag;
2060 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2061 
2062 	req->scmnd    = scmnd;
2063 	req->cmd      = iu;
2064 
2065 	len = srp_map_data(scmnd, ch, req);
2066 	if (len < 0) {
2067 		shost_printk(KERN_ERR, target->scsi_host,
2068 			     PFX "Failed to map data (%d)\n", len);
2069 		/*
2070 		 * If we ran out of memory descriptors (-ENOMEM) because an
2071 		 * application is queuing many requests with more than
2072 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2073 		 * to reduce queue depth temporarily.
2074 		 */
2075 		scmnd->result = len == -ENOMEM ?
2076 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2077 		goto err_iu;
2078 	}
2079 
2080 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2081 				      DMA_TO_DEVICE);
2082 
2083 	if (srp_post_send(ch, iu, len)) {
2084 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2085 		goto err_unmap;
2086 	}
2087 
2088 	ret = 0;
2089 
2090 unlock_rport:
2091 	if (in_scsi_eh)
2092 		mutex_unlock(&rport->mutex);
2093 
2094 	return ret;
2095 
2096 err_unmap:
2097 	srp_unmap_data(scmnd, ch, req);
2098 
2099 err_iu:
2100 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2101 
2102 	/*
2103 	 * Avoid that the loops that iterate over the request ring can
2104 	 * encounter a dangling SCSI command pointer.
2105 	 */
2106 	req->scmnd = NULL;
2107 
2108 err:
2109 	if (scmnd->result) {
2110 		scmnd->scsi_done(scmnd);
2111 		ret = 0;
2112 	} else {
2113 		ret = SCSI_MLQUEUE_HOST_BUSY;
2114 	}
2115 
2116 	goto unlock_rport;
2117 }
2118 
2119 /*
2120  * Note: the resources allocated in this function are freed in
2121  * srp_free_ch_ib().
2122  */
2123 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2124 {
2125 	struct srp_target_port *target = ch->target;
2126 	int i;
2127 
2128 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2129 			      GFP_KERNEL);
2130 	if (!ch->rx_ring)
2131 		goto err_no_ring;
2132 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2133 			      GFP_KERNEL);
2134 	if (!ch->tx_ring)
2135 		goto err_no_ring;
2136 
2137 	for (i = 0; i < target->queue_size; ++i) {
2138 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2139 					      ch->max_ti_iu_len,
2140 					      GFP_KERNEL, DMA_FROM_DEVICE);
2141 		if (!ch->rx_ring[i])
2142 			goto err;
2143 	}
2144 
2145 	for (i = 0; i < target->queue_size; ++i) {
2146 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2147 					      target->max_iu_len,
2148 					      GFP_KERNEL, DMA_TO_DEVICE);
2149 		if (!ch->tx_ring[i])
2150 			goto err;
2151 
2152 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2153 	}
2154 
2155 	return 0;
2156 
2157 err:
2158 	for (i = 0; i < target->queue_size; ++i) {
2159 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2160 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2161 	}
2162 
2163 
2164 err_no_ring:
2165 	kfree(ch->tx_ring);
2166 	ch->tx_ring = NULL;
2167 	kfree(ch->rx_ring);
2168 	ch->rx_ring = NULL;
2169 
2170 	return -ENOMEM;
2171 }
2172 
2173 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2174 {
2175 	uint64_t T_tr_ns, max_compl_time_ms;
2176 	uint32_t rq_tmo_jiffies;
2177 
2178 	/*
2179 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2180 	 * table 91), both the QP timeout and the retry count have to be set
2181 	 * for RC QP's during the RTR to RTS transition.
2182 	 */
2183 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2184 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2185 
2186 	/*
2187 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2188 	 * it can take before an error completion is generated. See also
2189 	 * C9-140..142 in the IBTA spec for more information about how to
2190 	 * convert the QP Local ACK Timeout value to nanoseconds.
2191 	 */
2192 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2193 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2194 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2195 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2196 
2197 	return rq_tmo_jiffies;
2198 }
2199 
2200 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2201 			       const struct srp_login_rsp *lrsp,
2202 			       struct srp_rdma_ch *ch)
2203 {
2204 	struct srp_target_port *target = ch->target;
2205 	struct ib_qp_attr *qp_attr = NULL;
2206 	int attr_mask = 0;
2207 	int ret;
2208 	int i;
2209 
2210 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2211 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2212 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2213 
2214 		/*
2215 		 * Reserve credits for task management so we don't
2216 		 * bounce requests back to the SCSI mid-layer.
2217 		 */
2218 		target->scsi_host->can_queue
2219 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2220 			      target->scsi_host->can_queue);
2221 		target->scsi_host->cmd_per_lun
2222 			= min_t(int, target->scsi_host->can_queue,
2223 				target->scsi_host->cmd_per_lun);
2224 	} else {
2225 		shost_printk(KERN_WARNING, target->scsi_host,
2226 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2227 		ret = -ECONNRESET;
2228 		goto error;
2229 	}
2230 
2231 	if (!ch->rx_ring) {
2232 		ret = srp_alloc_iu_bufs(ch);
2233 		if (ret)
2234 			goto error;
2235 	}
2236 
2237 	ret = -ENOMEM;
2238 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2239 	if (!qp_attr)
2240 		goto error;
2241 
2242 	qp_attr->qp_state = IB_QPS_RTR;
2243 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2244 	if (ret)
2245 		goto error_free;
2246 
2247 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2248 	if (ret)
2249 		goto error_free;
2250 
2251 	for (i = 0; i < target->queue_size; i++) {
2252 		struct srp_iu *iu = ch->rx_ring[i];
2253 
2254 		ret = srp_post_recv(ch, iu);
2255 		if (ret)
2256 			goto error_free;
2257 	}
2258 
2259 	qp_attr->qp_state = IB_QPS_RTS;
2260 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2261 	if (ret)
2262 		goto error_free;
2263 
2264 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2265 
2266 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2267 	if (ret)
2268 		goto error_free;
2269 
2270 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2271 
2272 error_free:
2273 	kfree(qp_attr);
2274 
2275 error:
2276 	ch->status = ret;
2277 }
2278 
2279 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2280 			       struct ib_cm_event *event,
2281 			       struct srp_rdma_ch *ch)
2282 {
2283 	struct srp_target_port *target = ch->target;
2284 	struct Scsi_Host *shost = target->scsi_host;
2285 	struct ib_class_port_info *cpi;
2286 	int opcode;
2287 
2288 	switch (event->param.rej_rcvd.reason) {
2289 	case IB_CM_REJ_PORT_CM_REDIRECT:
2290 		cpi = event->param.rej_rcvd.ari;
2291 		ch->path.dlid = cpi->redirect_lid;
2292 		ch->path.pkey = cpi->redirect_pkey;
2293 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2294 		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2295 
2296 		ch->status = ch->path.dlid ?
2297 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2298 		break;
2299 
2300 	case IB_CM_REJ_PORT_REDIRECT:
2301 		if (srp_target_is_topspin(target)) {
2302 			/*
2303 			 * Topspin/Cisco SRP gateways incorrectly send
2304 			 * reject reason code 25 when they mean 24
2305 			 * (port redirect).
2306 			 */
2307 			memcpy(ch->path.dgid.raw,
2308 			       event->param.rej_rcvd.ari, 16);
2309 
2310 			shost_printk(KERN_DEBUG, shost,
2311 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2312 				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2313 				     be64_to_cpu(ch->path.dgid.global.interface_id));
2314 
2315 			ch->status = SRP_PORT_REDIRECT;
2316 		} else {
2317 			shost_printk(KERN_WARNING, shost,
2318 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2319 			ch->status = -ECONNRESET;
2320 		}
2321 		break;
2322 
2323 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2324 		shost_printk(KERN_WARNING, shost,
2325 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2326 		ch->status = -ECONNRESET;
2327 		break;
2328 
2329 	case IB_CM_REJ_CONSUMER_DEFINED:
2330 		opcode = *(u8 *) event->private_data;
2331 		if (opcode == SRP_LOGIN_REJ) {
2332 			struct srp_login_rej *rej = event->private_data;
2333 			u32 reason = be32_to_cpu(rej->reason);
2334 
2335 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2336 				shost_printk(KERN_WARNING, shost,
2337 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2338 			else
2339 				shost_printk(KERN_WARNING, shost, PFX
2340 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2341 					     target->sgid.raw,
2342 					     target->orig_dgid.raw, reason);
2343 		} else
2344 			shost_printk(KERN_WARNING, shost,
2345 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2346 				     " opcode 0x%02x\n", opcode);
2347 		ch->status = -ECONNRESET;
2348 		break;
2349 
2350 	case IB_CM_REJ_STALE_CONN:
2351 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2352 		ch->status = SRP_STALE_CONN;
2353 		break;
2354 
2355 	default:
2356 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2357 			     event->param.rej_rcvd.reason);
2358 		ch->status = -ECONNRESET;
2359 	}
2360 }
2361 
2362 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2363 {
2364 	struct srp_rdma_ch *ch = cm_id->context;
2365 	struct srp_target_port *target = ch->target;
2366 	int comp = 0;
2367 
2368 	switch (event->event) {
2369 	case IB_CM_REQ_ERROR:
2370 		shost_printk(KERN_DEBUG, target->scsi_host,
2371 			     PFX "Sending CM REQ failed\n");
2372 		comp = 1;
2373 		ch->status = -ECONNRESET;
2374 		break;
2375 
2376 	case IB_CM_REP_RECEIVED:
2377 		comp = 1;
2378 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2379 		break;
2380 
2381 	case IB_CM_REJ_RECEIVED:
2382 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2383 		comp = 1;
2384 
2385 		srp_cm_rej_handler(cm_id, event, ch);
2386 		break;
2387 
2388 	case IB_CM_DREQ_RECEIVED:
2389 		shost_printk(KERN_WARNING, target->scsi_host,
2390 			     PFX "DREQ received - connection closed\n");
2391 		ch->connected = false;
2392 		if (ib_send_cm_drep(cm_id, NULL, 0))
2393 			shost_printk(KERN_ERR, target->scsi_host,
2394 				     PFX "Sending CM DREP failed\n");
2395 		queue_work(system_long_wq, &target->tl_err_work);
2396 		break;
2397 
2398 	case IB_CM_TIMEWAIT_EXIT:
2399 		shost_printk(KERN_ERR, target->scsi_host,
2400 			     PFX "connection closed\n");
2401 		comp = 1;
2402 
2403 		ch->status = 0;
2404 		break;
2405 
2406 	case IB_CM_MRA_RECEIVED:
2407 	case IB_CM_DREQ_ERROR:
2408 	case IB_CM_DREP_RECEIVED:
2409 		break;
2410 
2411 	default:
2412 		shost_printk(KERN_WARNING, target->scsi_host,
2413 			     PFX "Unhandled CM event %d\n", event->event);
2414 		break;
2415 	}
2416 
2417 	if (comp)
2418 		complete(&ch->done);
2419 
2420 	return 0;
2421 }
2422 
2423 /**
2424  * srp_change_queue_depth - setting device queue depth
2425  * @sdev: scsi device struct
2426  * @qdepth: requested queue depth
2427  *
2428  * Returns queue depth.
2429  */
2430 static int
2431 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2432 {
2433 	if (!sdev->tagged_supported)
2434 		qdepth = 1;
2435 	return scsi_change_queue_depth(sdev, qdepth);
2436 }
2437 
2438 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2439 			     u8 func)
2440 {
2441 	struct srp_target_port *target = ch->target;
2442 	struct srp_rport *rport = target->rport;
2443 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2444 	struct srp_iu *iu;
2445 	struct srp_tsk_mgmt *tsk_mgmt;
2446 
2447 	if (!ch->connected || target->qp_in_error)
2448 		return -1;
2449 
2450 	init_completion(&ch->tsk_mgmt_done);
2451 
2452 	/*
2453 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2454 	 * invoked while a task management function is being sent.
2455 	 */
2456 	mutex_lock(&rport->mutex);
2457 	spin_lock_irq(&ch->lock);
2458 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2459 	spin_unlock_irq(&ch->lock);
2460 
2461 	if (!iu) {
2462 		mutex_unlock(&rport->mutex);
2463 
2464 		return -1;
2465 	}
2466 
2467 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2468 				   DMA_TO_DEVICE);
2469 	tsk_mgmt = iu->buf;
2470 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2471 
2472 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2473 	int_to_scsilun(lun, &tsk_mgmt->lun);
2474 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2475 	tsk_mgmt->tsk_mgmt_func = func;
2476 	tsk_mgmt->task_tag	= req_tag;
2477 
2478 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2479 				      DMA_TO_DEVICE);
2480 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2481 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2482 		mutex_unlock(&rport->mutex);
2483 
2484 		return -1;
2485 	}
2486 	mutex_unlock(&rport->mutex);
2487 
2488 	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2489 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2490 		return -1;
2491 
2492 	return 0;
2493 }
2494 
2495 static int srp_abort(struct scsi_cmnd *scmnd)
2496 {
2497 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2498 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2499 	u32 tag;
2500 	u16 ch_idx;
2501 	struct srp_rdma_ch *ch;
2502 	int ret;
2503 
2504 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2505 
2506 	if (!req)
2507 		return SUCCESS;
2508 	tag = blk_mq_unique_tag(scmnd->request);
2509 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2510 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2511 		return SUCCESS;
2512 	ch = &target->ch[ch_idx];
2513 	if (!srp_claim_req(ch, req, NULL, scmnd))
2514 		return SUCCESS;
2515 	shost_printk(KERN_ERR, target->scsi_host,
2516 		     "Sending SRP abort for tag %#x\n", tag);
2517 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2518 			      SRP_TSK_ABORT_TASK) == 0)
2519 		ret = SUCCESS;
2520 	else if (target->rport->state == SRP_RPORT_LOST)
2521 		ret = FAST_IO_FAIL;
2522 	else
2523 		ret = FAILED;
2524 	srp_free_req(ch, req, scmnd, 0);
2525 	scmnd->result = DID_ABORT << 16;
2526 	scmnd->scsi_done(scmnd);
2527 
2528 	return ret;
2529 }
2530 
2531 static int srp_reset_device(struct scsi_cmnd *scmnd)
2532 {
2533 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2534 	struct srp_rdma_ch *ch;
2535 	int i;
2536 
2537 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2538 
2539 	ch = &target->ch[0];
2540 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2541 			      SRP_TSK_LUN_RESET))
2542 		return FAILED;
2543 	if (ch->tsk_mgmt_status)
2544 		return FAILED;
2545 
2546 	for (i = 0; i < target->ch_count; i++) {
2547 		ch = &target->ch[i];
2548 		for (i = 0; i < target->req_ring_size; ++i) {
2549 			struct srp_request *req = &ch->req_ring[i];
2550 
2551 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2552 		}
2553 	}
2554 
2555 	return SUCCESS;
2556 }
2557 
2558 static int srp_reset_host(struct scsi_cmnd *scmnd)
2559 {
2560 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2561 
2562 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2563 
2564 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2565 }
2566 
2567 static int srp_slave_configure(struct scsi_device *sdev)
2568 {
2569 	struct Scsi_Host *shost = sdev->host;
2570 	struct srp_target_port *target = host_to_target(shost);
2571 	struct request_queue *q = sdev->request_queue;
2572 	unsigned long timeout;
2573 
2574 	if (sdev->type == TYPE_DISK) {
2575 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2576 		blk_queue_rq_timeout(q, timeout);
2577 	}
2578 
2579 	return 0;
2580 }
2581 
2582 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2583 			   char *buf)
2584 {
2585 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2586 
2587 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2588 }
2589 
2590 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2591 			     char *buf)
2592 {
2593 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2594 
2595 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2596 }
2597 
2598 static ssize_t show_service_id(struct device *dev,
2599 			       struct device_attribute *attr, char *buf)
2600 {
2601 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2602 
2603 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2604 }
2605 
2606 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2607 			 char *buf)
2608 {
2609 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2610 
2611 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2612 }
2613 
2614 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2615 			 char *buf)
2616 {
2617 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2618 
2619 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2620 }
2621 
2622 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2623 			 char *buf)
2624 {
2625 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2626 	struct srp_rdma_ch *ch = &target->ch[0];
2627 
2628 	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2629 }
2630 
2631 static ssize_t show_orig_dgid(struct device *dev,
2632 			      struct device_attribute *attr, char *buf)
2633 {
2634 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2635 
2636 	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2637 }
2638 
2639 static ssize_t show_req_lim(struct device *dev,
2640 			    struct device_attribute *attr, char *buf)
2641 {
2642 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2643 	struct srp_rdma_ch *ch;
2644 	int i, req_lim = INT_MAX;
2645 
2646 	for (i = 0; i < target->ch_count; i++) {
2647 		ch = &target->ch[i];
2648 		req_lim = min(req_lim, ch->req_lim);
2649 	}
2650 	return sprintf(buf, "%d\n", req_lim);
2651 }
2652 
2653 static ssize_t show_zero_req_lim(struct device *dev,
2654 				 struct device_attribute *attr, char *buf)
2655 {
2656 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2657 
2658 	return sprintf(buf, "%d\n", target->zero_req_lim);
2659 }
2660 
2661 static ssize_t show_local_ib_port(struct device *dev,
2662 				  struct device_attribute *attr, char *buf)
2663 {
2664 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2665 
2666 	return sprintf(buf, "%d\n", target->srp_host->port);
2667 }
2668 
2669 static ssize_t show_local_ib_device(struct device *dev,
2670 				    struct device_attribute *attr, char *buf)
2671 {
2672 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2673 
2674 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2675 }
2676 
2677 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2678 			     char *buf)
2679 {
2680 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681 
2682 	return sprintf(buf, "%d\n", target->ch_count);
2683 }
2684 
2685 static ssize_t show_comp_vector(struct device *dev,
2686 				struct device_attribute *attr, char *buf)
2687 {
2688 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689 
2690 	return sprintf(buf, "%d\n", target->comp_vector);
2691 }
2692 
2693 static ssize_t show_tl_retry_count(struct device *dev,
2694 				   struct device_attribute *attr, char *buf)
2695 {
2696 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697 
2698 	return sprintf(buf, "%d\n", target->tl_retry_count);
2699 }
2700 
2701 static ssize_t show_cmd_sg_entries(struct device *dev,
2702 				   struct device_attribute *attr, char *buf)
2703 {
2704 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2705 
2706 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2707 }
2708 
2709 static ssize_t show_allow_ext_sg(struct device *dev,
2710 				 struct device_attribute *attr, char *buf)
2711 {
2712 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2713 
2714 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2715 }
2716 
2717 static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2718 static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2719 static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2720 static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2721 static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2722 static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2723 static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
2724 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2725 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2726 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2727 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2728 static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
2729 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2730 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2731 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2732 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2733 
2734 static struct device_attribute *srp_host_attrs[] = {
2735 	&dev_attr_id_ext,
2736 	&dev_attr_ioc_guid,
2737 	&dev_attr_service_id,
2738 	&dev_attr_pkey,
2739 	&dev_attr_sgid,
2740 	&dev_attr_dgid,
2741 	&dev_attr_orig_dgid,
2742 	&dev_attr_req_lim,
2743 	&dev_attr_zero_req_lim,
2744 	&dev_attr_local_ib_port,
2745 	&dev_attr_local_ib_device,
2746 	&dev_attr_ch_count,
2747 	&dev_attr_comp_vector,
2748 	&dev_attr_tl_retry_count,
2749 	&dev_attr_cmd_sg_entries,
2750 	&dev_attr_allow_ext_sg,
2751 	NULL
2752 };
2753 
2754 static struct scsi_host_template srp_template = {
2755 	.module				= THIS_MODULE,
2756 	.name				= "InfiniBand SRP initiator",
2757 	.proc_name			= DRV_NAME,
2758 	.slave_configure		= srp_slave_configure,
2759 	.info				= srp_target_info,
2760 	.queuecommand			= srp_queuecommand,
2761 	.change_queue_depth             = srp_change_queue_depth,
2762 	.eh_abort_handler		= srp_abort,
2763 	.eh_device_reset_handler	= srp_reset_device,
2764 	.eh_host_reset_handler		= srp_reset_host,
2765 	.skip_settle_delay		= true,
2766 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
2767 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2768 	.this_id			= -1,
2769 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
2770 	.use_clustering			= ENABLE_CLUSTERING,
2771 	.shost_attrs			= srp_host_attrs,
2772 	.track_queue_depth		= 1,
2773 };
2774 
2775 static int srp_sdev_count(struct Scsi_Host *host)
2776 {
2777 	struct scsi_device *sdev;
2778 	int c = 0;
2779 
2780 	shost_for_each_device(sdev, host)
2781 		c++;
2782 
2783 	return c;
2784 }
2785 
2786 /*
2787  * Return values:
2788  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2789  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2790  *    removal has been scheduled.
2791  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2792  */
2793 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2794 {
2795 	struct srp_rport_identifiers ids;
2796 	struct srp_rport *rport;
2797 
2798 	target->state = SRP_TARGET_SCANNING;
2799 	sprintf(target->target_name, "SRP.T10:%016llX",
2800 		be64_to_cpu(target->id_ext));
2801 
2802 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2803 		return -ENODEV;
2804 
2805 	memcpy(ids.port_id, &target->id_ext, 8);
2806 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2807 	ids.roles = SRP_RPORT_ROLE_TARGET;
2808 	rport = srp_rport_add(target->scsi_host, &ids);
2809 	if (IS_ERR(rport)) {
2810 		scsi_remove_host(target->scsi_host);
2811 		return PTR_ERR(rport);
2812 	}
2813 
2814 	rport->lld_data = target;
2815 	target->rport = rport;
2816 
2817 	spin_lock(&host->target_lock);
2818 	list_add_tail(&target->list, &host->target_list);
2819 	spin_unlock(&host->target_lock);
2820 
2821 	scsi_scan_target(&target->scsi_host->shost_gendev,
2822 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2823 
2824 	if (srp_connected_ch(target) < target->ch_count ||
2825 	    target->qp_in_error) {
2826 		shost_printk(KERN_INFO, target->scsi_host,
2827 			     PFX "SCSI scan failed - removing SCSI host\n");
2828 		srp_queue_remove_work(target);
2829 		goto out;
2830 	}
2831 
2832 	pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2833 		 dev_name(&target->scsi_host->shost_gendev),
2834 		 srp_sdev_count(target->scsi_host));
2835 
2836 	spin_lock_irq(&target->lock);
2837 	if (target->state == SRP_TARGET_SCANNING)
2838 		target->state = SRP_TARGET_LIVE;
2839 	spin_unlock_irq(&target->lock);
2840 
2841 out:
2842 	return 0;
2843 }
2844 
2845 static void srp_release_dev(struct device *dev)
2846 {
2847 	struct srp_host *host =
2848 		container_of(dev, struct srp_host, dev);
2849 
2850 	complete(&host->released);
2851 }
2852 
2853 static struct class srp_class = {
2854 	.name    = "infiniband_srp",
2855 	.dev_release = srp_release_dev
2856 };
2857 
2858 /**
2859  * srp_conn_unique() - check whether the connection to a target is unique
2860  * @host:   SRP host.
2861  * @target: SRP target port.
2862  */
2863 static bool srp_conn_unique(struct srp_host *host,
2864 			    struct srp_target_port *target)
2865 {
2866 	struct srp_target_port *t;
2867 	bool ret = false;
2868 
2869 	if (target->state == SRP_TARGET_REMOVED)
2870 		goto out;
2871 
2872 	ret = true;
2873 
2874 	spin_lock(&host->target_lock);
2875 	list_for_each_entry(t, &host->target_list, list) {
2876 		if (t != target &&
2877 		    target->id_ext == t->id_ext &&
2878 		    target->ioc_guid == t->ioc_guid &&
2879 		    target->initiator_ext == t->initiator_ext) {
2880 			ret = false;
2881 			break;
2882 		}
2883 	}
2884 	spin_unlock(&host->target_lock);
2885 
2886 out:
2887 	return ret;
2888 }
2889 
2890 /*
2891  * Target ports are added by writing
2892  *
2893  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2894  *     pkey=<P_Key>,service_id=<service ID>
2895  *
2896  * to the add_target sysfs attribute.
2897  */
2898 enum {
2899 	SRP_OPT_ERR		= 0,
2900 	SRP_OPT_ID_EXT		= 1 << 0,
2901 	SRP_OPT_IOC_GUID	= 1 << 1,
2902 	SRP_OPT_DGID		= 1 << 2,
2903 	SRP_OPT_PKEY		= 1 << 3,
2904 	SRP_OPT_SERVICE_ID	= 1 << 4,
2905 	SRP_OPT_MAX_SECT	= 1 << 5,
2906 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
2907 	SRP_OPT_IO_CLASS	= 1 << 7,
2908 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
2909 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2910 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
2911 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
2912 	SRP_OPT_COMP_VECTOR	= 1 << 12,
2913 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
2914 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2915 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
2916 				   SRP_OPT_IOC_GUID	|
2917 				   SRP_OPT_DGID		|
2918 				   SRP_OPT_PKEY		|
2919 				   SRP_OPT_SERVICE_ID),
2920 };
2921 
2922 static const match_table_t srp_opt_tokens = {
2923 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
2924 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
2925 	{ SRP_OPT_DGID,			"dgid=%s" 		},
2926 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
2927 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
2928 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
2929 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
2930 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
2931 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
2932 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2933 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
2934 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
2935 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
2936 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
2937 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2938 	{ SRP_OPT_ERR,			NULL 			}
2939 };
2940 
2941 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2942 {
2943 	char *options, *sep_opt;
2944 	char *p;
2945 	char dgid[3];
2946 	substring_t args[MAX_OPT_ARGS];
2947 	int opt_mask = 0;
2948 	int token;
2949 	int ret = -EINVAL;
2950 	int i;
2951 
2952 	options = kstrdup(buf, GFP_KERNEL);
2953 	if (!options)
2954 		return -ENOMEM;
2955 
2956 	sep_opt = options;
2957 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2958 		if (!*p)
2959 			continue;
2960 
2961 		token = match_token(p, srp_opt_tokens, args);
2962 		opt_mask |= token;
2963 
2964 		switch (token) {
2965 		case SRP_OPT_ID_EXT:
2966 			p = match_strdup(args);
2967 			if (!p) {
2968 				ret = -ENOMEM;
2969 				goto out;
2970 			}
2971 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2972 			kfree(p);
2973 			break;
2974 
2975 		case SRP_OPT_IOC_GUID:
2976 			p = match_strdup(args);
2977 			if (!p) {
2978 				ret = -ENOMEM;
2979 				goto out;
2980 			}
2981 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2982 			kfree(p);
2983 			break;
2984 
2985 		case SRP_OPT_DGID:
2986 			p = match_strdup(args);
2987 			if (!p) {
2988 				ret = -ENOMEM;
2989 				goto out;
2990 			}
2991 			if (strlen(p) != 32) {
2992 				pr_warn("bad dest GID parameter '%s'\n", p);
2993 				kfree(p);
2994 				goto out;
2995 			}
2996 
2997 			for (i = 0; i < 16; ++i) {
2998 				strlcpy(dgid, p + i * 2, sizeof(dgid));
2999 				if (sscanf(dgid, "%hhx",
3000 					   &target->orig_dgid.raw[i]) < 1) {
3001 					ret = -EINVAL;
3002 					kfree(p);
3003 					goto out;
3004 				}
3005 			}
3006 			kfree(p);
3007 			break;
3008 
3009 		case SRP_OPT_PKEY:
3010 			if (match_hex(args, &token)) {
3011 				pr_warn("bad P_Key parameter '%s'\n", p);
3012 				goto out;
3013 			}
3014 			target->pkey = cpu_to_be16(token);
3015 			break;
3016 
3017 		case SRP_OPT_SERVICE_ID:
3018 			p = match_strdup(args);
3019 			if (!p) {
3020 				ret = -ENOMEM;
3021 				goto out;
3022 			}
3023 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3024 			kfree(p);
3025 			break;
3026 
3027 		case SRP_OPT_MAX_SECT:
3028 			if (match_int(args, &token)) {
3029 				pr_warn("bad max sect parameter '%s'\n", p);
3030 				goto out;
3031 			}
3032 			target->scsi_host->max_sectors = token;
3033 			break;
3034 
3035 		case SRP_OPT_QUEUE_SIZE:
3036 			if (match_int(args, &token) || token < 1) {
3037 				pr_warn("bad queue_size parameter '%s'\n", p);
3038 				goto out;
3039 			}
3040 			target->scsi_host->can_queue = token;
3041 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3042 					     SRP_TSK_MGMT_SQ_SIZE;
3043 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3044 				target->scsi_host->cmd_per_lun = token;
3045 			break;
3046 
3047 		case SRP_OPT_MAX_CMD_PER_LUN:
3048 			if (match_int(args, &token) || token < 1) {
3049 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3050 					p);
3051 				goto out;
3052 			}
3053 			target->scsi_host->cmd_per_lun = token;
3054 			break;
3055 
3056 		case SRP_OPT_IO_CLASS:
3057 			if (match_hex(args, &token)) {
3058 				pr_warn("bad IO class parameter '%s'\n", p);
3059 				goto out;
3060 			}
3061 			if (token != SRP_REV10_IB_IO_CLASS &&
3062 			    token != SRP_REV16A_IB_IO_CLASS) {
3063 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3064 					token, SRP_REV10_IB_IO_CLASS,
3065 					SRP_REV16A_IB_IO_CLASS);
3066 				goto out;
3067 			}
3068 			target->io_class = token;
3069 			break;
3070 
3071 		case SRP_OPT_INITIATOR_EXT:
3072 			p = match_strdup(args);
3073 			if (!p) {
3074 				ret = -ENOMEM;
3075 				goto out;
3076 			}
3077 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3078 			kfree(p);
3079 			break;
3080 
3081 		case SRP_OPT_CMD_SG_ENTRIES:
3082 			if (match_int(args, &token) || token < 1 || token > 255) {
3083 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3084 					p);
3085 				goto out;
3086 			}
3087 			target->cmd_sg_cnt = token;
3088 			break;
3089 
3090 		case SRP_OPT_ALLOW_EXT_SG:
3091 			if (match_int(args, &token)) {
3092 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3093 				goto out;
3094 			}
3095 			target->allow_ext_sg = !!token;
3096 			break;
3097 
3098 		case SRP_OPT_SG_TABLESIZE:
3099 			if (match_int(args, &token) || token < 1 ||
3100 					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3101 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3102 					p);
3103 				goto out;
3104 			}
3105 			target->sg_tablesize = token;
3106 			break;
3107 
3108 		case SRP_OPT_COMP_VECTOR:
3109 			if (match_int(args, &token) || token < 0) {
3110 				pr_warn("bad comp_vector parameter '%s'\n", p);
3111 				goto out;
3112 			}
3113 			target->comp_vector = token;
3114 			break;
3115 
3116 		case SRP_OPT_TL_RETRY_COUNT:
3117 			if (match_int(args, &token) || token < 2 || token > 7) {
3118 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3119 					p);
3120 				goto out;
3121 			}
3122 			target->tl_retry_count = token;
3123 			break;
3124 
3125 		default:
3126 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3127 				p);
3128 			goto out;
3129 		}
3130 	}
3131 
3132 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3133 		ret = 0;
3134 	else
3135 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3136 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3137 			    !(srp_opt_tokens[i].token & opt_mask))
3138 				pr_warn("target creation request is missing parameter '%s'\n",
3139 					srp_opt_tokens[i].pattern);
3140 
3141 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3142 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3143 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3144 			target->scsi_host->cmd_per_lun,
3145 			target->scsi_host->can_queue);
3146 
3147 out:
3148 	kfree(options);
3149 	return ret;
3150 }
3151 
3152 static ssize_t srp_create_target(struct device *dev,
3153 				 struct device_attribute *attr,
3154 				 const char *buf, size_t count)
3155 {
3156 	struct srp_host *host =
3157 		container_of(dev, struct srp_host, dev);
3158 	struct Scsi_Host *target_host;
3159 	struct srp_target_port *target;
3160 	struct srp_rdma_ch *ch;
3161 	struct srp_device *srp_dev = host->srp_dev;
3162 	struct ib_device *ibdev = srp_dev->dev;
3163 	int ret, node_idx, node, cpu, i;
3164 	bool multich = false;
3165 
3166 	target_host = scsi_host_alloc(&srp_template,
3167 				      sizeof (struct srp_target_port));
3168 	if (!target_host)
3169 		return -ENOMEM;
3170 
3171 	target_host->transportt  = ib_srp_transport_template;
3172 	target_host->max_channel = 0;
3173 	target_host->max_id      = 1;
3174 	target_host->max_lun     = -1LL;
3175 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3176 
3177 	target = host_to_target(target_host);
3178 
3179 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3180 	target->scsi_host	= target_host;
3181 	target->srp_host	= host;
3182 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3183 	target->global_mr	= host->srp_dev->global_mr;
3184 	target->cmd_sg_cnt	= cmd_sg_entries;
3185 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3186 	target->allow_ext_sg	= allow_ext_sg;
3187 	target->tl_retry_count	= 7;
3188 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3189 
3190 	/*
3191 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3192 	 * before this function returns.
3193 	 */
3194 	scsi_host_get(target->scsi_host);
3195 
3196 	mutex_lock(&host->add_target_mutex);
3197 
3198 	ret = srp_parse_options(buf, target);
3199 	if (ret)
3200 		goto out;
3201 
3202 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3203 
3204 	if (!srp_conn_unique(target->srp_host, target)) {
3205 		shost_printk(KERN_INFO, target->scsi_host,
3206 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3207 			     be64_to_cpu(target->id_ext),
3208 			     be64_to_cpu(target->ioc_guid),
3209 			     be64_to_cpu(target->initiator_ext));
3210 		ret = -EEXIST;
3211 		goto out;
3212 	}
3213 
3214 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3215 	    target->cmd_sg_cnt < target->sg_tablesize) {
3216 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3217 		target->sg_tablesize = target->cmd_sg_cnt;
3218 	}
3219 
3220 	target_host->sg_tablesize = target->sg_tablesize;
3221 	target->indirect_size = target->sg_tablesize *
3222 				sizeof (struct srp_direct_buf);
3223 	target->max_iu_len = sizeof (struct srp_cmd) +
3224 			     sizeof (struct srp_indirect_buf) +
3225 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3226 
3227 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3228 	INIT_WORK(&target->remove_work, srp_remove_work);
3229 	spin_lock_init(&target->lock);
3230 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3231 	if (ret)
3232 		goto out;
3233 
3234 	ret = -ENOMEM;
3235 	target->ch_count = max_t(unsigned, num_online_nodes(),
3236 				 min(ch_count ? :
3237 				     min(4 * num_online_nodes(),
3238 					 ibdev->num_comp_vectors),
3239 				     num_online_cpus()));
3240 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3241 			     GFP_KERNEL);
3242 	if (!target->ch)
3243 		goto out;
3244 
3245 	node_idx = 0;
3246 	for_each_online_node(node) {
3247 		const int ch_start = (node_idx * target->ch_count /
3248 				      num_online_nodes());
3249 		const int ch_end = ((node_idx + 1) * target->ch_count /
3250 				    num_online_nodes());
3251 		const int cv_start = (node_idx * ibdev->num_comp_vectors /
3252 				      num_online_nodes() + target->comp_vector)
3253 				     % ibdev->num_comp_vectors;
3254 		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3255 				    num_online_nodes() + target->comp_vector)
3256 				   % ibdev->num_comp_vectors;
3257 		int cpu_idx = 0;
3258 
3259 		for_each_online_cpu(cpu) {
3260 			if (cpu_to_node(cpu) != node)
3261 				continue;
3262 			if (ch_start + cpu_idx >= ch_end)
3263 				continue;
3264 			ch = &target->ch[ch_start + cpu_idx];
3265 			ch->target = target;
3266 			ch->comp_vector = cv_start == cv_end ? cv_start :
3267 				cv_start + cpu_idx % (cv_end - cv_start);
3268 			spin_lock_init(&ch->lock);
3269 			INIT_LIST_HEAD(&ch->free_tx);
3270 			ret = srp_new_cm_id(ch);
3271 			if (ret)
3272 				goto err_disconnect;
3273 
3274 			ret = srp_create_ch_ib(ch);
3275 			if (ret)
3276 				goto err_disconnect;
3277 
3278 			ret = srp_alloc_req_data(ch);
3279 			if (ret)
3280 				goto err_disconnect;
3281 
3282 			ret = srp_connect_ch(ch, multich);
3283 			if (ret) {
3284 				shost_printk(KERN_ERR, target->scsi_host,
3285 					     PFX "Connection %d/%d failed\n",
3286 					     ch_start + cpu_idx,
3287 					     target->ch_count);
3288 				if (node_idx == 0 && cpu_idx == 0) {
3289 					goto err_disconnect;
3290 				} else {
3291 					srp_free_ch_ib(target, ch);
3292 					srp_free_req_data(target, ch);
3293 					target->ch_count = ch - target->ch;
3294 					goto connected;
3295 				}
3296 			}
3297 
3298 			multich = true;
3299 			cpu_idx++;
3300 		}
3301 		node_idx++;
3302 	}
3303 
3304 connected:
3305 	target->scsi_host->nr_hw_queues = target->ch_count;
3306 
3307 	ret = srp_add_target(host, target);
3308 	if (ret)
3309 		goto err_disconnect;
3310 
3311 	if (target->state != SRP_TARGET_REMOVED) {
3312 		shost_printk(KERN_DEBUG, target->scsi_host, PFX
3313 			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3314 			     be64_to_cpu(target->id_ext),
3315 			     be64_to_cpu(target->ioc_guid),
3316 			     be16_to_cpu(target->pkey),
3317 			     be64_to_cpu(target->service_id),
3318 			     target->sgid.raw, target->orig_dgid.raw);
3319 	}
3320 
3321 	ret = count;
3322 
3323 out:
3324 	mutex_unlock(&host->add_target_mutex);
3325 
3326 	scsi_host_put(target->scsi_host);
3327 	if (ret < 0)
3328 		scsi_host_put(target->scsi_host);
3329 
3330 	return ret;
3331 
3332 err_disconnect:
3333 	srp_disconnect_target(target);
3334 
3335 	for (i = 0; i < target->ch_count; i++) {
3336 		ch = &target->ch[i];
3337 		srp_free_ch_ib(target, ch);
3338 		srp_free_req_data(target, ch);
3339 	}
3340 
3341 	kfree(target->ch);
3342 	goto out;
3343 }
3344 
3345 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3346 
3347 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3348 			  char *buf)
3349 {
3350 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3351 
3352 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3353 }
3354 
3355 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3356 
3357 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3358 			 char *buf)
3359 {
3360 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3361 
3362 	return sprintf(buf, "%d\n", host->port);
3363 }
3364 
3365 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3366 
3367 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3368 {
3369 	struct srp_host *host;
3370 
3371 	host = kzalloc(sizeof *host, GFP_KERNEL);
3372 	if (!host)
3373 		return NULL;
3374 
3375 	INIT_LIST_HEAD(&host->target_list);
3376 	spin_lock_init(&host->target_lock);
3377 	init_completion(&host->released);
3378 	mutex_init(&host->add_target_mutex);
3379 	host->srp_dev = device;
3380 	host->port = port;
3381 
3382 	host->dev.class = &srp_class;
3383 	host->dev.parent = device->dev->dma_device;
3384 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3385 
3386 	if (device_register(&host->dev))
3387 		goto free_host;
3388 	if (device_create_file(&host->dev, &dev_attr_add_target))
3389 		goto err_class;
3390 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3391 		goto err_class;
3392 	if (device_create_file(&host->dev, &dev_attr_port))
3393 		goto err_class;
3394 
3395 	return host;
3396 
3397 err_class:
3398 	device_unregister(&host->dev);
3399 
3400 free_host:
3401 	kfree(host);
3402 
3403 	return NULL;
3404 }
3405 
3406 static void srp_add_one(struct ib_device *device)
3407 {
3408 	struct srp_device *srp_dev;
3409 	struct srp_host *host;
3410 	int mr_page_shift, p;
3411 	u64 max_pages_per_mr;
3412 
3413 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3414 	if (!srp_dev)
3415 		return;
3416 
3417 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3418 			    device->map_phys_fmr && device->unmap_fmr);
3419 	srp_dev->has_fr = (device->attrs.device_cap_flags &
3420 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
3421 	if (!srp_dev->has_fmr && !srp_dev->has_fr)
3422 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3423 
3424 	srp_dev->use_fast_reg = (srp_dev->has_fr &&
3425 				 (!srp_dev->has_fmr || prefer_fr));
3426 	srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3427 
3428 	/*
3429 	 * Use the smallest page size supported by the HCA, down to a
3430 	 * minimum of 4096 bytes. We're unlikely to build large sglists
3431 	 * out of smaller entries.
3432 	 */
3433 	mr_page_shift		= max(12, ffs(device->attrs.page_size_cap) - 1);
3434 	srp_dev->mr_page_size	= 1 << mr_page_shift;
3435 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
3436 	max_pages_per_mr	= device->attrs.max_mr_size;
3437 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
3438 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3439 					  max_pages_per_mr);
3440 	if (srp_dev->use_fast_reg) {
3441 		srp_dev->max_pages_per_mr =
3442 			min_t(u32, srp_dev->max_pages_per_mr,
3443 			      device->attrs.max_fast_reg_page_list_len);
3444 	}
3445 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
3446 				   srp_dev->max_pages_per_mr;
3447 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3448 		 device->name, mr_page_shift, device->attrs.max_mr_size,
3449 		 device->attrs.max_fast_reg_page_list_len,
3450 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3451 
3452 	INIT_LIST_HEAD(&srp_dev->dev_list);
3453 
3454 	srp_dev->dev = device;
3455 	srp_dev->pd  = ib_alloc_pd(device);
3456 	if (IS_ERR(srp_dev->pd))
3457 		goto free_dev;
3458 
3459 	if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3460 		srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3461 						   IB_ACCESS_LOCAL_WRITE |
3462 						   IB_ACCESS_REMOTE_READ |
3463 						   IB_ACCESS_REMOTE_WRITE);
3464 		if (IS_ERR(srp_dev->global_mr))
3465 			goto err_pd;
3466 	} else {
3467 		srp_dev->global_mr = NULL;
3468 	}
3469 
3470 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3471 		host = srp_add_port(srp_dev, p);
3472 		if (host)
3473 			list_add_tail(&host->list, &srp_dev->dev_list);
3474 	}
3475 
3476 	ib_set_client_data(device, &srp_client, srp_dev);
3477 	return;
3478 
3479 err_pd:
3480 	ib_dealloc_pd(srp_dev->pd);
3481 
3482 free_dev:
3483 	kfree(srp_dev);
3484 }
3485 
3486 static void srp_remove_one(struct ib_device *device, void *client_data)
3487 {
3488 	struct srp_device *srp_dev;
3489 	struct srp_host *host, *tmp_host;
3490 	struct srp_target_port *target;
3491 
3492 	srp_dev = client_data;
3493 	if (!srp_dev)
3494 		return;
3495 
3496 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3497 		device_unregister(&host->dev);
3498 		/*
3499 		 * Wait for the sysfs entry to go away, so that no new
3500 		 * target ports can be created.
3501 		 */
3502 		wait_for_completion(&host->released);
3503 
3504 		/*
3505 		 * Remove all target ports.
3506 		 */
3507 		spin_lock(&host->target_lock);
3508 		list_for_each_entry(target, &host->target_list, list)
3509 			srp_queue_remove_work(target);
3510 		spin_unlock(&host->target_lock);
3511 
3512 		/*
3513 		 * Wait for tl_err and target port removal tasks.
3514 		 */
3515 		flush_workqueue(system_long_wq);
3516 		flush_workqueue(srp_remove_wq);
3517 
3518 		kfree(host);
3519 	}
3520 
3521 	if (srp_dev->global_mr)
3522 		ib_dereg_mr(srp_dev->global_mr);
3523 	ib_dealloc_pd(srp_dev->pd);
3524 
3525 	kfree(srp_dev);
3526 }
3527 
3528 static struct srp_function_template ib_srp_transport_functions = {
3529 	.has_rport_state	 = true,
3530 	.reset_timer_if_blocked	 = true,
3531 	.reconnect_delay	 = &srp_reconnect_delay,
3532 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3533 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3534 	.reconnect		 = srp_rport_reconnect,
3535 	.rport_delete		 = srp_rport_delete,
3536 	.terminate_rport_io	 = srp_terminate_io,
3537 };
3538 
3539 static int __init srp_init_module(void)
3540 {
3541 	int ret;
3542 
3543 	if (srp_sg_tablesize) {
3544 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3545 		if (!cmd_sg_entries)
3546 			cmd_sg_entries = srp_sg_tablesize;
3547 	}
3548 
3549 	if (!cmd_sg_entries)
3550 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3551 
3552 	if (cmd_sg_entries > 255) {
3553 		pr_warn("Clamping cmd_sg_entries to 255\n");
3554 		cmd_sg_entries = 255;
3555 	}
3556 
3557 	if (!indirect_sg_entries)
3558 		indirect_sg_entries = cmd_sg_entries;
3559 	else if (indirect_sg_entries < cmd_sg_entries) {
3560 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3561 			cmd_sg_entries);
3562 		indirect_sg_entries = cmd_sg_entries;
3563 	}
3564 
3565 	srp_remove_wq = create_workqueue("srp_remove");
3566 	if (!srp_remove_wq) {
3567 		ret = -ENOMEM;
3568 		goto out;
3569 	}
3570 
3571 	ret = -ENOMEM;
3572 	ib_srp_transport_template =
3573 		srp_attach_transport(&ib_srp_transport_functions);
3574 	if (!ib_srp_transport_template)
3575 		goto destroy_wq;
3576 
3577 	ret = class_register(&srp_class);
3578 	if (ret) {
3579 		pr_err("couldn't register class infiniband_srp\n");
3580 		goto release_tr;
3581 	}
3582 
3583 	ib_sa_register_client(&srp_sa_client);
3584 
3585 	ret = ib_register_client(&srp_client);
3586 	if (ret) {
3587 		pr_err("couldn't register IB client\n");
3588 		goto unreg_sa;
3589 	}
3590 
3591 out:
3592 	return ret;
3593 
3594 unreg_sa:
3595 	ib_sa_unregister_client(&srp_sa_client);
3596 	class_unregister(&srp_class);
3597 
3598 release_tr:
3599 	srp_release_transport(ib_srp_transport_template);
3600 
3601 destroy_wq:
3602 	destroy_workqueue(srp_remove_wq);
3603 	goto out;
3604 }
3605 
3606 static void __exit srp_cleanup_module(void)
3607 {
3608 	ib_unregister_client(&srp_client);
3609 	ib_sa_unregister_client(&srp_sa_client);
3610 	class_unregister(&srp_class);
3611 	srp_release_transport(ib_srp_transport_template);
3612 	destroy_workqueue(srp_remove_wq);
3613 }
3614 
3615 module_init(srp_init_module);
3616 module_exit(srp_cleanup_module);
3617