1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
44 
45 #include <linux/atomic.h>
46 
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/srp.h>
52 #include <scsi/scsi_transport_srp.h>
53 
54 #include "ib_srp.h"
55 
56 #define DRV_NAME	"ib_srp"
57 #define PFX		DRV_NAME ": "
58 #define DRV_VERSION	"1.0"
59 #define DRV_RELDATE	"July 1, 2013"
60 
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 		   "v" DRV_VERSION " (" DRV_RELDATE ")");
64 MODULE_LICENSE("Dual BSD/GPL");
65 
66 static unsigned int srp_sg_tablesize;
67 static unsigned int cmd_sg_entries;
68 static unsigned int indirect_sg_entries;
69 static bool allow_ext_sg;
70 static bool prefer_fr;
71 static bool register_always;
72 static int topspin_workarounds = 1;
73 
74 module_param(srp_sg_tablesize, uint, 0444);
75 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76 
77 module_param(cmd_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(cmd_sg_entries,
79 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80 
81 module_param(indirect_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(indirect_sg_entries,
83 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
84 
85 module_param(allow_ext_sg, bool, 0444);
86 MODULE_PARM_DESC(allow_ext_sg,
87 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88 
89 module_param(topspin_workarounds, int, 0444);
90 MODULE_PARM_DESC(topspin_workarounds,
91 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92 
93 module_param(prefer_fr, bool, 0444);
94 MODULE_PARM_DESC(prefer_fr,
95 "Whether to use fast registration if both FMR and fast registration are supported");
96 
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 		 "Use memory registration even for contiguous memory regions");
100 
101 static struct kernel_param_ops srp_tmo_ops;
102 
103 static int srp_reconnect_delay = 10;
104 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
105 		S_IRUGO | S_IWUSR);
106 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
107 
108 static int srp_fast_io_fail_tmo = 15;
109 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
110 		S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(fast_io_fail_tmo,
112 		 "Number of seconds between the observation of a transport"
113 		 " layer error and failing all I/O. \"off\" means that this"
114 		 " functionality is disabled.");
115 
116 static int srp_dev_loss_tmo = 600;
117 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
118 		S_IRUGO | S_IWUSR);
119 MODULE_PARM_DESC(dev_loss_tmo,
120 		 "Maximum number of seconds that the SRP transport should"
121 		 " insulate transport layer errors. After this time has been"
122 		 " exceeded the SCSI host is removed. Should be"
123 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 		 " this functionality is disabled.");
126 
127 static unsigned ch_count;
128 module_param(ch_count, uint, 0444);
129 MODULE_PARM_DESC(ch_count,
130 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
131 
132 static void srp_add_one(struct ib_device *device);
133 static void srp_remove_one(struct ib_device *device);
134 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
136 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
137 
138 static struct scsi_transport_template *ib_srp_transport_template;
139 static struct workqueue_struct *srp_remove_wq;
140 
141 static struct ib_client srp_client = {
142 	.name   = "srp",
143 	.add    = srp_add_one,
144 	.remove = srp_remove_one
145 };
146 
147 static struct ib_sa_client srp_sa_client;
148 
149 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
150 {
151 	int tmo = *(int *)kp->arg;
152 
153 	if (tmo >= 0)
154 		return sprintf(buffer, "%d", tmo);
155 	else
156 		return sprintf(buffer, "off");
157 }
158 
159 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
160 {
161 	int tmo, res;
162 
163 	if (strncmp(val, "off", 3) != 0) {
164 		res = kstrtoint(val, 0, &tmo);
165 		if (res)
166 			goto out;
167 	} else {
168 		tmo = -1;
169 	}
170 	if (kp->arg == &srp_reconnect_delay)
171 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 				    srp_dev_loss_tmo);
173 	else if (kp->arg == &srp_fast_io_fail_tmo)
174 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
175 	else
176 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
177 				    tmo);
178 	if (res)
179 		goto out;
180 	*(int *)kp->arg = tmo;
181 
182 out:
183 	return res;
184 }
185 
186 static struct kernel_param_ops srp_tmo_ops = {
187 	.get = srp_tmo_get,
188 	.set = srp_tmo_set,
189 };
190 
191 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192 {
193 	return (struct srp_target_port *) host->hostdata;
194 }
195 
196 static const char *srp_target_info(struct Scsi_Host *host)
197 {
198 	return host_to_target(host)->target_name;
199 }
200 
201 static int srp_target_is_topspin(struct srp_target_port *target)
202 {
203 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
204 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
205 
206 	return topspin_workarounds &&
207 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
209 }
210 
211 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 				   gfp_t gfp_mask,
213 				   enum dma_data_direction direction)
214 {
215 	struct srp_iu *iu;
216 
217 	iu = kmalloc(sizeof *iu, gfp_mask);
218 	if (!iu)
219 		goto out;
220 
221 	iu->buf = kzalloc(size, gfp_mask);
222 	if (!iu->buf)
223 		goto out_free_iu;
224 
225 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 				    direction);
227 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
228 		goto out_free_buf;
229 
230 	iu->size      = size;
231 	iu->direction = direction;
232 
233 	return iu;
234 
235 out_free_buf:
236 	kfree(iu->buf);
237 out_free_iu:
238 	kfree(iu);
239 out:
240 	return NULL;
241 }
242 
243 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
244 {
245 	if (!iu)
246 		return;
247 
248 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
249 			    iu->direction);
250 	kfree(iu->buf);
251 	kfree(iu);
252 }
253 
254 static void srp_qp_event(struct ib_event *event, void *context)
255 {
256 	pr_debug("QP event %d\n", event->event);
257 }
258 
259 static int srp_init_qp(struct srp_target_port *target,
260 		       struct ib_qp *qp)
261 {
262 	struct ib_qp_attr *attr;
263 	int ret;
264 
265 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 	if (!attr)
267 		return -ENOMEM;
268 
269 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 				  target->srp_host->port,
271 				  be16_to_cpu(target->pkey),
272 				  &attr->pkey_index);
273 	if (ret)
274 		goto out;
275 
276 	attr->qp_state        = IB_QPS_INIT;
277 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 				    IB_ACCESS_REMOTE_WRITE);
279 	attr->port_num        = target->srp_host->port;
280 
281 	ret = ib_modify_qp(qp, attr,
282 			   IB_QP_STATE		|
283 			   IB_QP_PKEY_INDEX	|
284 			   IB_QP_ACCESS_FLAGS	|
285 			   IB_QP_PORT);
286 
287 out:
288 	kfree(attr);
289 	return ret;
290 }
291 
292 static int srp_new_cm_id(struct srp_rdma_ch *ch)
293 {
294 	struct srp_target_port *target = ch->target;
295 	struct ib_cm_id *new_cm_id;
296 
297 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298 				    srp_cm_handler, ch);
299 	if (IS_ERR(new_cm_id))
300 		return PTR_ERR(new_cm_id);
301 
302 	if (ch->cm_id)
303 		ib_destroy_cm_id(ch->cm_id);
304 	ch->cm_id = new_cm_id;
305 	ch->path.sgid = target->sgid;
306 	ch->path.dgid = target->orig_dgid;
307 	ch->path.pkey = target->pkey;
308 	ch->path.service_id = target->service_id;
309 
310 	return 0;
311 }
312 
313 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314 {
315 	struct srp_device *dev = target->srp_host->srp_dev;
316 	struct ib_fmr_pool_param fmr_param;
317 
318 	memset(&fmr_param, 0, sizeof(fmr_param));
319 	fmr_param.pool_size	    = target->scsi_host->can_queue;
320 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
321 	fmr_param.cache		    = 1;
322 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
324 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
325 				       IB_ACCESS_REMOTE_WRITE |
326 				       IB_ACCESS_REMOTE_READ);
327 
328 	return ib_create_fmr_pool(dev->pd, &fmr_param);
329 }
330 
331 /**
332  * srp_destroy_fr_pool() - free the resources owned by a pool
333  * @pool: Fast registration pool to be destroyed.
334  */
335 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336 {
337 	int i;
338 	struct srp_fr_desc *d;
339 
340 	if (!pool)
341 		return;
342 
343 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 		if (d->frpl)
345 			ib_free_fast_reg_page_list(d->frpl);
346 		if (d->mr)
347 			ib_dereg_mr(d->mr);
348 	}
349 	kfree(pool);
350 }
351 
352 /**
353  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
354  * @device:            IB device to allocate fast registration descriptors for.
355  * @pd:                Protection domain associated with the FR descriptors.
356  * @pool_size:         Number of descriptors to allocate.
357  * @max_page_list_len: Maximum fast registration work request page list length.
358  */
359 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
360 					      struct ib_pd *pd, int pool_size,
361 					      int max_page_list_len)
362 {
363 	struct srp_fr_pool *pool;
364 	struct srp_fr_desc *d;
365 	struct ib_mr *mr;
366 	struct ib_fast_reg_page_list *frpl;
367 	int i, ret = -EINVAL;
368 
369 	if (pool_size <= 0)
370 		goto err;
371 	ret = -ENOMEM;
372 	pool = kzalloc(sizeof(struct srp_fr_pool) +
373 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
374 	if (!pool)
375 		goto err;
376 	pool->size = pool_size;
377 	pool->max_page_list_len = max_page_list_len;
378 	spin_lock_init(&pool->lock);
379 	INIT_LIST_HEAD(&pool->free_list);
380 
381 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
382 		mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
383 		if (IS_ERR(mr)) {
384 			ret = PTR_ERR(mr);
385 			goto destroy_pool;
386 		}
387 		d->mr = mr;
388 		frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 		if (IS_ERR(frpl)) {
390 			ret = PTR_ERR(frpl);
391 			goto destroy_pool;
392 		}
393 		d->frpl = frpl;
394 		list_add_tail(&d->entry, &pool->free_list);
395 	}
396 
397 out:
398 	return pool;
399 
400 destroy_pool:
401 	srp_destroy_fr_pool(pool);
402 
403 err:
404 	pool = ERR_PTR(ret);
405 	goto out;
406 }
407 
408 /**
409  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410  * @pool: Pool to obtain descriptor from.
411  */
412 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413 {
414 	struct srp_fr_desc *d = NULL;
415 	unsigned long flags;
416 
417 	spin_lock_irqsave(&pool->lock, flags);
418 	if (!list_empty(&pool->free_list)) {
419 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 		list_del(&d->entry);
421 	}
422 	spin_unlock_irqrestore(&pool->lock, flags);
423 
424 	return d;
425 }
426 
427 /**
428  * srp_fr_pool_put() - put an FR descriptor back in the free list
429  * @pool: Pool the descriptor was allocated from.
430  * @desc: Pointer to an array of fast registration descriptor pointers.
431  * @n:    Number of descriptors to put back.
432  *
433  * Note: The caller must already have queued an invalidation request for
434  * desc->mr->rkey before calling this function.
435  */
436 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 			    int n)
438 {
439 	unsigned long flags;
440 	int i;
441 
442 	spin_lock_irqsave(&pool->lock, flags);
443 	for (i = 0; i < n; i++)
444 		list_add(&desc[i]->entry, &pool->free_list);
445 	spin_unlock_irqrestore(&pool->lock, flags);
446 }
447 
448 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449 {
450 	struct srp_device *dev = target->srp_host->srp_dev;
451 
452 	return srp_create_fr_pool(dev->dev, dev->pd,
453 				  target->scsi_host->can_queue,
454 				  dev->max_pages_per_mr);
455 }
456 
457 /**
458  * srp_destroy_qp() - destroy an RDMA queue pair
459  * @ch: SRP RDMA channel.
460  *
461  * Change a queue pair into the error state and wait until all receive
462  * completions have been processed before destroying it. This avoids that
463  * the receive completion handler can access the queue pair while it is
464  * being destroyed.
465  */
466 static void srp_destroy_qp(struct srp_rdma_ch *ch)
467 {
468 	struct srp_target_port *target = ch->target;
469 	static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
470 	static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
471 	struct ib_recv_wr *bad_wr;
472 	int ret;
473 
474 	/* Destroying a QP and reusing ch->done is only safe if not connected */
475 	WARN_ON_ONCE(target->connected);
476 
477 	ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
478 	WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
479 	if (ret)
480 		goto out;
481 
482 	init_completion(&ch->done);
483 	ret = ib_post_recv(ch->qp, &wr, &bad_wr);
484 	WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 	if (ret == 0)
486 		wait_for_completion(&ch->done);
487 
488 out:
489 	ib_destroy_qp(ch->qp);
490 }
491 
492 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
493 {
494 	struct srp_target_port *target = ch->target;
495 	struct srp_device *dev = target->srp_host->srp_dev;
496 	struct ib_qp_init_attr *init_attr;
497 	struct ib_cq *recv_cq, *send_cq;
498 	struct ib_qp *qp;
499 	struct ib_fmr_pool *fmr_pool = NULL;
500 	struct srp_fr_pool *fr_pool = NULL;
501 	const int m = 1 + dev->use_fast_reg;
502 	int ret;
503 
504 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 	if (!init_attr)
506 		return -ENOMEM;
507 
508 	/* + 1 for SRP_LAST_WR_ID */
509 	recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
510 			       target->queue_size + 1, ch->comp_vector);
511 	if (IS_ERR(recv_cq)) {
512 		ret = PTR_ERR(recv_cq);
513 		goto err;
514 	}
515 
516 	send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
517 			       m * target->queue_size, ch->comp_vector);
518 	if (IS_ERR(send_cq)) {
519 		ret = PTR_ERR(send_cq);
520 		goto err_recv_cq;
521 	}
522 
523 	ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
524 
525 	init_attr->event_handler       = srp_qp_event;
526 	init_attr->cap.max_send_wr     = m * target->queue_size;
527 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
528 	init_attr->cap.max_recv_sge    = 1;
529 	init_attr->cap.max_send_sge    = 1;
530 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
531 	init_attr->qp_type             = IB_QPT_RC;
532 	init_attr->send_cq             = send_cq;
533 	init_attr->recv_cq             = recv_cq;
534 
535 	qp = ib_create_qp(dev->pd, init_attr);
536 	if (IS_ERR(qp)) {
537 		ret = PTR_ERR(qp);
538 		goto err_send_cq;
539 	}
540 
541 	ret = srp_init_qp(target, qp);
542 	if (ret)
543 		goto err_qp;
544 
545 	if (dev->use_fast_reg && dev->has_fr) {
546 		fr_pool = srp_alloc_fr_pool(target);
547 		if (IS_ERR(fr_pool)) {
548 			ret = PTR_ERR(fr_pool);
549 			shost_printk(KERN_WARNING, target->scsi_host, PFX
550 				     "FR pool allocation failed (%d)\n", ret);
551 			goto err_qp;
552 		}
553 		if (ch->fr_pool)
554 			srp_destroy_fr_pool(ch->fr_pool);
555 		ch->fr_pool = fr_pool;
556 	} else if (!dev->use_fast_reg && dev->has_fmr) {
557 		fmr_pool = srp_alloc_fmr_pool(target);
558 		if (IS_ERR(fmr_pool)) {
559 			ret = PTR_ERR(fmr_pool);
560 			shost_printk(KERN_WARNING, target->scsi_host, PFX
561 				     "FMR pool allocation failed (%d)\n", ret);
562 			goto err_qp;
563 		}
564 		if (ch->fmr_pool)
565 			ib_destroy_fmr_pool(ch->fmr_pool);
566 		ch->fmr_pool = fmr_pool;
567 	}
568 
569 	if (ch->qp)
570 		srp_destroy_qp(ch);
571 	if (ch->recv_cq)
572 		ib_destroy_cq(ch->recv_cq);
573 	if (ch->send_cq)
574 		ib_destroy_cq(ch->send_cq);
575 
576 	ch->qp = qp;
577 	ch->recv_cq = recv_cq;
578 	ch->send_cq = send_cq;
579 
580 	kfree(init_attr);
581 	return 0;
582 
583 err_qp:
584 	ib_destroy_qp(qp);
585 
586 err_send_cq:
587 	ib_destroy_cq(send_cq);
588 
589 err_recv_cq:
590 	ib_destroy_cq(recv_cq);
591 
592 err:
593 	kfree(init_attr);
594 	return ret;
595 }
596 
597 /*
598  * Note: this function may be called without srp_alloc_iu_bufs() having been
599  * invoked. Hence the ch->[rt]x_ring checks.
600  */
601 static void srp_free_ch_ib(struct srp_target_port *target,
602 			   struct srp_rdma_ch *ch)
603 {
604 	struct srp_device *dev = target->srp_host->srp_dev;
605 	int i;
606 
607 	if (!ch->target)
608 		return;
609 
610 	if (ch->cm_id) {
611 		ib_destroy_cm_id(ch->cm_id);
612 		ch->cm_id = NULL;
613 	}
614 
615 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
616 	if (!ch->qp)
617 		return;
618 
619 	if (dev->use_fast_reg) {
620 		if (ch->fr_pool)
621 			srp_destroy_fr_pool(ch->fr_pool);
622 	} else {
623 		if (ch->fmr_pool)
624 			ib_destroy_fmr_pool(ch->fmr_pool);
625 	}
626 	srp_destroy_qp(ch);
627 	ib_destroy_cq(ch->send_cq);
628 	ib_destroy_cq(ch->recv_cq);
629 
630 	/*
631 	 * Avoid that the SCSI error handler tries to use this channel after
632 	 * it has been freed. The SCSI error handler can namely continue
633 	 * trying to perform recovery actions after scsi_remove_host()
634 	 * returned.
635 	 */
636 	ch->target = NULL;
637 
638 	ch->qp = NULL;
639 	ch->send_cq = ch->recv_cq = NULL;
640 
641 	if (ch->rx_ring) {
642 		for (i = 0; i < target->queue_size; ++i)
643 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
644 		kfree(ch->rx_ring);
645 		ch->rx_ring = NULL;
646 	}
647 	if (ch->tx_ring) {
648 		for (i = 0; i < target->queue_size; ++i)
649 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
650 		kfree(ch->tx_ring);
651 		ch->tx_ring = NULL;
652 	}
653 }
654 
655 static void srp_path_rec_completion(int status,
656 				    struct ib_sa_path_rec *pathrec,
657 				    void *ch_ptr)
658 {
659 	struct srp_rdma_ch *ch = ch_ptr;
660 	struct srp_target_port *target = ch->target;
661 
662 	ch->status = status;
663 	if (status)
664 		shost_printk(KERN_ERR, target->scsi_host,
665 			     PFX "Got failed path rec status %d\n", status);
666 	else
667 		ch->path = *pathrec;
668 	complete(&ch->done);
669 }
670 
671 static int srp_lookup_path(struct srp_rdma_ch *ch)
672 {
673 	struct srp_target_port *target = ch->target;
674 	int ret;
675 
676 	ch->path.numb_path = 1;
677 
678 	init_completion(&ch->done);
679 
680 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
681 					       target->srp_host->srp_dev->dev,
682 					       target->srp_host->port,
683 					       &ch->path,
684 					       IB_SA_PATH_REC_SERVICE_ID |
685 					       IB_SA_PATH_REC_DGID	 |
686 					       IB_SA_PATH_REC_SGID	 |
687 					       IB_SA_PATH_REC_NUMB_PATH	 |
688 					       IB_SA_PATH_REC_PKEY,
689 					       SRP_PATH_REC_TIMEOUT_MS,
690 					       GFP_KERNEL,
691 					       srp_path_rec_completion,
692 					       ch, &ch->path_query);
693 	if (ch->path_query_id < 0)
694 		return ch->path_query_id;
695 
696 	ret = wait_for_completion_interruptible(&ch->done);
697 	if (ret < 0)
698 		return ret;
699 
700 	if (ch->status < 0)
701 		shost_printk(KERN_WARNING, target->scsi_host,
702 			     PFX "Path record query failed\n");
703 
704 	return ch->status;
705 }
706 
707 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
708 {
709 	struct srp_target_port *target = ch->target;
710 	struct {
711 		struct ib_cm_req_param param;
712 		struct srp_login_req   priv;
713 	} *req = NULL;
714 	int status;
715 
716 	req = kzalloc(sizeof *req, GFP_KERNEL);
717 	if (!req)
718 		return -ENOMEM;
719 
720 	req->param.primary_path		      = &ch->path;
721 	req->param.alternate_path 	      = NULL;
722 	req->param.service_id 		      = target->service_id;
723 	req->param.qp_num		      = ch->qp->qp_num;
724 	req->param.qp_type		      = ch->qp->qp_type;
725 	req->param.private_data 	      = &req->priv;
726 	req->param.private_data_len 	      = sizeof req->priv;
727 	req->param.flow_control 	      = 1;
728 
729 	get_random_bytes(&req->param.starting_psn, 4);
730 	req->param.starting_psn 	     &= 0xffffff;
731 
732 	/*
733 	 * Pick some arbitrary defaults here; we could make these
734 	 * module parameters if anyone cared about setting them.
735 	 */
736 	req->param.responder_resources	      = 4;
737 	req->param.remote_cm_response_timeout = 20;
738 	req->param.local_cm_response_timeout  = 20;
739 	req->param.retry_count                = target->tl_retry_count;
740 	req->param.rnr_retry_count 	      = 7;
741 	req->param.max_cm_retries 	      = 15;
742 
743 	req->priv.opcode     	= SRP_LOGIN_REQ;
744 	req->priv.tag        	= 0;
745 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
746 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
747 					      SRP_BUF_FORMAT_INDIRECT);
748 	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
749 				   SRP_MULTICHAN_SINGLE);
750 	/*
751 	 * In the published SRP specification (draft rev. 16a), the
752 	 * port identifier format is 8 bytes of ID extension followed
753 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
754 	 * opposite order, so that the GUID comes first.
755 	 *
756 	 * Targets conforming to these obsolete drafts can be
757 	 * recognized by the I/O Class they report.
758 	 */
759 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
760 		memcpy(req->priv.initiator_port_id,
761 		       &target->sgid.global.interface_id, 8);
762 		memcpy(req->priv.initiator_port_id + 8,
763 		       &target->initiator_ext, 8);
764 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
765 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
766 	} else {
767 		memcpy(req->priv.initiator_port_id,
768 		       &target->initiator_ext, 8);
769 		memcpy(req->priv.initiator_port_id + 8,
770 		       &target->sgid.global.interface_id, 8);
771 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
772 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
773 	}
774 
775 	/*
776 	 * Topspin/Cisco SRP targets will reject our login unless we
777 	 * zero out the first 8 bytes of our initiator port ID and set
778 	 * the second 8 bytes to the local node GUID.
779 	 */
780 	if (srp_target_is_topspin(target)) {
781 		shost_printk(KERN_DEBUG, target->scsi_host,
782 			     PFX "Topspin/Cisco initiator port ID workaround "
783 			     "activated for target GUID %016llx\n",
784 			     (unsigned long long) be64_to_cpu(target->ioc_guid));
785 		memset(req->priv.initiator_port_id, 0, 8);
786 		memcpy(req->priv.initiator_port_id + 8,
787 		       &target->srp_host->srp_dev->dev->node_guid, 8);
788 	}
789 
790 	status = ib_send_cm_req(ch->cm_id, &req->param);
791 
792 	kfree(req);
793 
794 	return status;
795 }
796 
797 static bool srp_queue_remove_work(struct srp_target_port *target)
798 {
799 	bool changed = false;
800 
801 	spin_lock_irq(&target->lock);
802 	if (target->state != SRP_TARGET_REMOVED) {
803 		target->state = SRP_TARGET_REMOVED;
804 		changed = true;
805 	}
806 	spin_unlock_irq(&target->lock);
807 
808 	if (changed)
809 		queue_work(srp_remove_wq, &target->remove_work);
810 
811 	return changed;
812 }
813 
814 static bool srp_change_conn_state(struct srp_target_port *target,
815 				  bool connected)
816 {
817 	bool changed = false;
818 
819 	spin_lock_irq(&target->lock);
820 	if (target->connected != connected) {
821 		target->connected = connected;
822 		changed = true;
823 	}
824 	spin_unlock_irq(&target->lock);
825 
826 	return changed;
827 }
828 
829 static void srp_disconnect_target(struct srp_target_port *target)
830 {
831 	struct srp_rdma_ch *ch;
832 	int i;
833 
834 	if (srp_change_conn_state(target, false)) {
835 		/* XXX should send SRP_I_LOGOUT request */
836 
837 		for (i = 0; i < target->ch_count; i++) {
838 			ch = &target->ch[i];
839 			if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
840 				shost_printk(KERN_DEBUG, target->scsi_host,
841 					     PFX "Sending CM DREQ failed\n");
842 			}
843 		}
844 	}
845 }
846 
847 static void srp_free_req_data(struct srp_target_port *target,
848 			      struct srp_rdma_ch *ch)
849 {
850 	struct srp_device *dev = target->srp_host->srp_dev;
851 	struct ib_device *ibdev = dev->dev;
852 	struct srp_request *req;
853 	int i;
854 
855 	if (!ch->target || !ch->req_ring)
856 		return;
857 
858 	for (i = 0; i < target->req_ring_size; ++i) {
859 		req = &ch->req_ring[i];
860 		if (dev->use_fast_reg)
861 			kfree(req->fr_list);
862 		else
863 			kfree(req->fmr_list);
864 		kfree(req->map_page);
865 		if (req->indirect_dma_addr) {
866 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
867 					    target->indirect_size,
868 					    DMA_TO_DEVICE);
869 		}
870 		kfree(req->indirect_desc);
871 	}
872 
873 	kfree(ch->req_ring);
874 	ch->req_ring = NULL;
875 }
876 
877 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
878 {
879 	struct srp_target_port *target = ch->target;
880 	struct srp_device *srp_dev = target->srp_host->srp_dev;
881 	struct ib_device *ibdev = srp_dev->dev;
882 	struct srp_request *req;
883 	void *mr_list;
884 	dma_addr_t dma_addr;
885 	int i, ret = -ENOMEM;
886 
887 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
888 			       GFP_KERNEL);
889 	if (!ch->req_ring)
890 		goto out;
891 
892 	for (i = 0; i < target->req_ring_size; ++i) {
893 		req = &ch->req_ring[i];
894 		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
895 				  GFP_KERNEL);
896 		if (!mr_list)
897 			goto out;
898 		if (srp_dev->use_fast_reg)
899 			req->fr_list = mr_list;
900 		else
901 			req->fmr_list = mr_list;
902 		req->map_page = kmalloc(srp_dev->max_pages_per_mr *
903 					sizeof(void *), GFP_KERNEL);
904 		if (!req->map_page)
905 			goto out;
906 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
907 		if (!req->indirect_desc)
908 			goto out;
909 
910 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
911 					     target->indirect_size,
912 					     DMA_TO_DEVICE);
913 		if (ib_dma_mapping_error(ibdev, dma_addr))
914 			goto out;
915 
916 		req->indirect_dma_addr = dma_addr;
917 	}
918 	ret = 0;
919 
920 out:
921 	return ret;
922 }
923 
924 /**
925  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
926  * @shost: SCSI host whose attributes to remove from sysfs.
927  *
928  * Note: Any attributes defined in the host template and that did not exist
929  * before invocation of this function will be ignored.
930  */
931 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
932 {
933 	struct device_attribute **attr;
934 
935 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
936 		device_remove_file(&shost->shost_dev, *attr);
937 }
938 
939 static void srp_remove_target(struct srp_target_port *target)
940 {
941 	struct srp_rdma_ch *ch;
942 	int i;
943 
944 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
945 
946 	srp_del_scsi_host_attr(target->scsi_host);
947 	srp_rport_get(target->rport);
948 	srp_remove_host(target->scsi_host);
949 	scsi_remove_host(target->scsi_host);
950 	srp_stop_rport_timers(target->rport);
951 	srp_disconnect_target(target);
952 	for (i = 0; i < target->ch_count; i++) {
953 		ch = &target->ch[i];
954 		srp_free_ch_ib(target, ch);
955 	}
956 	cancel_work_sync(&target->tl_err_work);
957 	srp_rport_put(target->rport);
958 	for (i = 0; i < target->ch_count; i++) {
959 		ch = &target->ch[i];
960 		srp_free_req_data(target, ch);
961 	}
962 	kfree(target->ch);
963 	target->ch = NULL;
964 
965 	spin_lock(&target->srp_host->target_lock);
966 	list_del(&target->list);
967 	spin_unlock(&target->srp_host->target_lock);
968 
969 	scsi_host_put(target->scsi_host);
970 }
971 
972 static void srp_remove_work(struct work_struct *work)
973 {
974 	struct srp_target_port *target =
975 		container_of(work, struct srp_target_port, remove_work);
976 
977 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
978 
979 	srp_remove_target(target);
980 }
981 
982 static void srp_rport_delete(struct srp_rport *rport)
983 {
984 	struct srp_target_port *target = rport->lld_data;
985 
986 	srp_queue_remove_work(target);
987 }
988 
989 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
990 {
991 	struct srp_target_port *target = ch->target;
992 	int ret;
993 
994 	WARN_ON_ONCE(!multich && target->connected);
995 
996 	target->qp_in_error = false;
997 
998 	ret = srp_lookup_path(ch);
999 	if (ret)
1000 		return ret;
1001 
1002 	while (1) {
1003 		init_completion(&ch->done);
1004 		ret = srp_send_req(ch, multich);
1005 		if (ret)
1006 			return ret;
1007 		ret = wait_for_completion_interruptible(&ch->done);
1008 		if (ret < 0)
1009 			return ret;
1010 
1011 		/*
1012 		 * The CM event handling code will set status to
1013 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1014 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015 		 * redirect REJ back.
1016 		 */
1017 		switch (ch->status) {
1018 		case 0:
1019 			srp_change_conn_state(target, true);
1020 			return 0;
1021 
1022 		case SRP_PORT_REDIRECT:
1023 			ret = srp_lookup_path(ch);
1024 			if (ret)
1025 				return ret;
1026 			break;
1027 
1028 		case SRP_DLID_REDIRECT:
1029 			break;
1030 
1031 		case SRP_STALE_CONN:
1032 			shost_printk(KERN_ERR, target->scsi_host, PFX
1033 				     "giving up on stale connection\n");
1034 			ch->status = -ECONNRESET;
1035 			return ch->status;
1036 
1037 		default:
1038 			return ch->status;
1039 		}
1040 	}
1041 }
1042 
1043 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1044 {
1045 	struct ib_send_wr *bad_wr;
1046 	struct ib_send_wr wr = {
1047 		.opcode		    = IB_WR_LOCAL_INV,
1048 		.wr_id		    = LOCAL_INV_WR_ID_MASK,
1049 		.next		    = NULL,
1050 		.num_sge	    = 0,
1051 		.send_flags	    = 0,
1052 		.ex.invalidate_rkey = rkey,
1053 	};
1054 
1055 	return ib_post_send(ch->qp, &wr, &bad_wr);
1056 }
1057 
1058 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1059 			   struct srp_rdma_ch *ch,
1060 			   struct srp_request *req)
1061 {
1062 	struct srp_target_port *target = ch->target;
1063 	struct srp_device *dev = target->srp_host->srp_dev;
1064 	struct ib_device *ibdev = dev->dev;
1065 	int i, res;
1066 
1067 	if (!scsi_sglist(scmnd) ||
1068 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1070 		return;
1071 
1072 	if (dev->use_fast_reg) {
1073 		struct srp_fr_desc **pfr;
1074 
1075 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1076 			res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1077 			if (res < 0) {
1078 				shost_printk(KERN_ERR, target->scsi_host, PFX
1079 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1080 				  (*pfr)->mr->rkey, res);
1081 				queue_work(system_long_wq,
1082 					   &target->tl_err_work);
1083 			}
1084 		}
1085 		if (req->nmdesc)
1086 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1087 					req->nmdesc);
1088 	} else {
1089 		struct ib_pool_fmr **pfmr;
1090 
1091 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 			ib_fmr_pool_unmap(*pfmr);
1093 	}
1094 
1095 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 			scmnd->sc_data_direction);
1097 }
1098 
1099 /**
1100  * srp_claim_req - Take ownership of the scmnd associated with a request.
1101  * @ch: SRP RDMA channel.
1102  * @req: SRP request.
1103  * @sdev: If not NULL, only take ownership for this SCSI device.
1104  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105  *         ownership of @req->scmnd if it equals @scmnd.
1106  *
1107  * Return value:
1108  * Either NULL or a pointer to the SCSI command the caller became owner of.
1109  */
1110 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1111 				       struct srp_request *req,
1112 				       struct scsi_device *sdev,
1113 				       struct scsi_cmnd *scmnd)
1114 {
1115 	unsigned long flags;
1116 
1117 	spin_lock_irqsave(&ch->lock, flags);
1118 	if (req->scmnd &&
1119 	    (!sdev || req->scmnd->device == sdev) &&
1120 	    (!scmnd || req->scmnd == scmnd)) {
1121 		scmnd = req->scmnd;
1122 		req->scmnd = NULL;
1123 	} else {
1124 		scmnd = NULL;
1125 	}
1126 	spin_unlock_irqrestore(&ch->lock, flags);
1127 
1128 	return scmnd;
1129 }
1130 
1131 /**
1132  * srp_free_req() - Unmap data and add request to the free request list.
1133  * @ch:     SRP RDMA channel.
1134  * @req:    Request to be freed.
1135  * @scmnd:  SCSI command associated with @req.
1136  * @req_lim_delta: Amount to be added to @target->req_lim.
1137  */
1138 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1140 {
1141 	unsigned long flags;
1142 
1143 	srp_unmap_data(scmnd, ch, req);
1144 
1145 	spin_lock_irqsave(&ch->lock, flags);
1146 	ch->req_lim += req_lim_delta;
1147 	spin_unlock_irqrestore(&ch->lock, flags);
1148 }
1149 
1150 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 			   struct scsi_device *sdev, int result)
1152 {
1153 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1154 
1155 	if (scmnd) {
1156 		srp_free_req(ch, req, scmnd, 0);
1157 		scmnd->result = result;
1158 		scmnd->scsi_done(scmnd);
1159 	}
1160 }
1161 
1162 static void srp_terminate_io(struct srp_rport *rport)
1163 {
1164 	struct srp_target_port *target = rport->lld_data;
1165 	struct srp_rdma_ch *ch;
1166 	struct Scsi_Host *shost = target->scsi_host;
1167 	struct scsi_device *sdev;
1168 	int i, j;
1169 
1170 	/*
1171 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 	 * is not safe. Hence the warning statement below.
1173 	 */
1174 	shost_for_each_device(sdev, shost)
1175 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176 
1177 	for (i = 0; i < target->ch_count; i++) {
1178 		ch = &target->ch[i];
1179 
1180 		for (j = 0; j < target->req_ring_size; ++j) {
1181 			struct srp_request *req = &ch->req_ring[j];
1182 
1183 			srp_finish_req(ch, req, NULL,
1184 				       DID_TRANSPORT_FAILFAST << 16);
1185 		}
1186 	}
1187 }
1188 
1189 /*
1190  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192  * srp_reset_device() or srp_reset_host() calls will occur while this function
1193  * is in progress. One way to realize that is not to call this function
1194  * directly but to call srp_reconnect_rport() instead since that last function
1195  * serializes calls of this function via rport->mutex and also blocks
1196  * srp_queuecommand() calls before invoking this function.
1197  */
1198 static int srp_rport_reconnect(struct srp_rport *rport)
1199 {
1200 	struct srp_target_port *target = rport->lld_data;
1201 	struct srp_rdma_ch *ch;
1202 	int i, j, ret = 0;
1203 	bool multich = false;
1204 
1205 	srp_disconnect_target(target);
1206 
1207 	if (target->state == SRP_TARGET_SCANNING)
1208 		return -ENODEV;
1209 
1210 	/*
1211 	 * Now get a new local CM ID so that we avoid confusing the target in
1212 	 * case things are really fouled up. Doing so also ensures that all CM
1213 	 * callbacks will have finished before a new QP is allocated.
1214 	 */
1215 	for (i = 0; i < target->ch_count; i++) {
1216 		ch = &target->ch[i];
1217 		if (!ch->target)
1218 			break;
1219 		ret += srp_new_cm_id(ch);
1220 	}
1221 	for (i = 0; i < target->ch_count; i++) {
1222 		ch = &target->ch[i];
1223 		if (!ch->target)
1224 			break;
1225 		for (j = 0; j < target->req_ring_size; ++j) {
1226 			struct srp_request *req = &ch->req_ring[j];
1227 
1228 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1229 		}
1230 	}
1231 	for (i = 0; i < target->ch_count; i++) {
1232 		ch = &target->ch[i];
1233 		if (!ch->target)
1234 			break;
1235 		/*
1236 		 * Whether or not creating a new CM ID succeeded, create a new
1237 		 * QP. This guarantees that all completion callback function
1238 		 * invocations have finished before request resetting starts.
1239 		 */
1240 		ret += srp_create_ch_ib(ch);
1241 
1242 		INIT_LIST_HEAD(&ch->free_tx);
1243 		for (j = 0; j < target->queue_size; ++j)
1244 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1245 	}
1246 	for (i = 0; i < target->ch_count; i++) {
1247 		ch = &target->ch[i];
1248 		if (ret || !ch->target) {
1249 			if (i > 1)
1250 				ret = 0;
1251 			break;
1252 		}
1253 		ret = srp_connect_ch(ch, multich);
1254 		multich = true;
1255 	}
1256 
1257 	if (ret == 0)
1258 		shost_printk(KERN_INFO, target->scsi_host,
1259 			     PFX "reconnect succeeded\n");
1260 
1261 	return ret;
1262 }
1263 
1264 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1265 			 unsigned int dma_len, u32 rkey)
1266 {
1267 	struct srp_direct_buf *desc = state->desc;
1268 
1269 	desc->va = cpu_to_be64(dma_addr);
1270 	desc->key = cpu_to_be32(rkey);
1271 	desc->len = cpu_to_be32(dma_len);
1272 
1273 	state->total_len += dma_len;
1274 	state->desc++;
1275 	state->ndesc++;
1276 }
1277 
1278 static int srp_map_finish_fmr(struct srp_map_state *state,
1279 			      struct srp_rdma_ch *ch)
1280 {
1281 	struct ib_pool_fmr *fmr;
1282 	u64 io_addr = 0;
1283 
1284 	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1285 				   state->npages, io_addr);
1286 	if (IS_ERR(fmr))
1287 		return PTR_ERR(fmr);
1288 
1289 	*state->next_fmr++ = fmr;
1290 	state->nmdesc++;
1291 
1292 	srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1293 
1294 	return 0;
1295 }
1296 
1297 static int srp_map_finish_fr(struct srp_map_state *state,
1298 			     struct srp_rdma_ch *ch)
1299 {
1300 	struct srp_target_port *target = ch->target;
1301 	struct srp_device *dev = target->srp_host->srp_dev;
1302 	struct ib_send_wr *bad_wr;
1303 	struct ib_send_wr wr;
1304 	struct srp_fr_desc *desc;
1305 	u32 rkey;
1306 
1307 	desc = srp_fr_pool_get(ch->fr_pool);
1308 	if (!desc)
1309 		return -ENOMEM;
1310 
1311 	rkey = ib_inc_rkey(desc->mr->rkey);
1312 	ib_update_fast_reg_key(desc->mr, rkey);
1313 
1314 	memcpy(desc->frpl->page_list, state->pages,
1315 	       sizeof(state->pages[0]) * state->npages);
1316 
1317 	memset(&wr, 0, sizeof(wr));
1318 	wr.opcode = IB_WR_FAST_REG_MR;
1319 	wr.wr_id = FAST_REG_WR_ID_MASK;
1320 	wr.wr.fast_reg.iova_start = state->base_dma_addr;
1321 	wr.wr.fast_reg.page_list = desc->frpl;
1322 	wr.wr.fast_reg.page_list_len = state->npages;
1323 	wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1324 	wr.wr.fast_reg.length = state->dma_len;
1325 	wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1326 				       IB_ACCESS_REMOTE_READ |
1327 				       IB_ACCESS_REMOTE_WRITE);
1328 	wr.wr.fast_reg.rkey = desc->mr->lkey;
1329 
1330 	*state->next_fr++ = desc;
1331 	state->nmdesc++;
1332 
1333 	srp_map_desc(state, state->base_dma_addr, state->dma_len,
1334 		     desc->mr->rkey);
1335 
1336 	return ib_post_send(ch->qp, &wr, &bad_wr);
1337 }
1338 
1339 static int srp_finish_mapping(struct srp_map_state *state,
1340 			      struct srp_rdma_ch *ch)
1341 {
1342 	struct srp_target_port *target = ch->target;
1343 	int ret = 0;
1344 
1345 	if (state->npages == 0)
1346 		return 0;
1347 
1348 	if (state->npages == 1 && !register_always)
1349 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
1350 			     target->rkey);
1351 	else
1352 		ret = target->srp_host->srp_dev->use_fast_reg ?
1353 			srp_map_finish_fr(state, ch) :
1354 			srp_map_finish_fmr(state, ch);
1355 
1356 	if (ret == 0) {
1357 		state->npages = 0;
1358 		state->dma_len = 0;
1359 	}
1360 
1361 	return ret;
1362 }
1363 
1364 static void srp_map_update_start(struct srp_map_state *state,
1365 				 struct scatterlist *sg, int sg_index,
1366 				 dma_addr_t dma_addr)
1367 {
1368 	state->unmapped_sg = sg;
1369 	state->unmapped_index = sg_index;
1370 	state->unmapped_addr = dma_addr;
1371 }
1372 
1373 static int srp_map_sg_entry(struct srp_map_state *state,
1374 			    struct srp_rdma_ch *ch,
1375 			    struct scatterlist *sg, int sg_index,
1376 			    bool use_mr)
1377 {
1378 	struct srp_target_port *target = ch->target;
1379 	struct srp_device *dev = target->srp_host->srp_dev;
1380 	struct ib_device *ibdev = dev->dev;
1381 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1382 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1383 	unsigned int len;
1384 	int ret;
1385 
1386 	if (!dma_len)
1387 		return 0;
1388 
1389 	if (!use_mr) {
1390 		/*
1391 		 * Once we're in direct map mode for a request, we don't
1392 		 * go back to FMR or FR mode, so no need to update anything
1393 		 * other than the descriptor.
1394 		 */
1395 		srp_map_desc(state, dma_addr, dma_len, target->rkey);
1396 		return 0;
1397 	}
1398 
1399 	/*
1400 	 * Since not all RDMA HW drivers support non-zero page offsets for
1401 	 * FMR, if we start at an offset into a page, don't merge into the
1402 	 * current FMR mapping. Finish it out, and use the kernel's MR for
1403 	 * this sg entry.
1404 	 */
1405 	if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1406 	    dma_len > dev->mr_max_size) {
1407 		ret = srp_finish_mapping(state, ch);
1408 		if (ret)
1409 			return ret;
1410 
1411 		srp_map_desc(state, dma_addr, dma_len, target->rkey);
1412 		srp_map_update_start(state, NULL, 0, 0);
1413 		return 0;
1414 	}
1415 
1416 	/*
1417 	 * If this is the first sg that will be mapped via FMR or via FR, save
1418 	 * our position. We need to know the first unmapped entry, its index,
1419 	 * and the first unmapped address within that entry to be able to
1420 	 * restart mapping after an error.
1421 	 */
1422 	if (!state->unmapped_sg)
1423 		srp_map_update_start(state, sg, sg_index, dma_addr);
1424 
1425 	while (dma_len) {
1426 		unsigned offset = dma_addr & ~dev->mr_page_mask;
1427 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1428 			ret = srp_finish_mapping(state, ch);
1429 			if (ret)
1430 				return ret;
1431 
1432 			srp_map_update_start(state, sg, sg_index, dma_addr);
1433 		}
1434 
1435 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1436 
1437 		if (!state->npages)
1438 			state->base_dma_addr = dma_addr;
1439 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1440 		state->dma_len += len;
1441 		dma_addr += len;
1442 		dma_len -= len;
1443 	}
1444 
1445 	/*
1446 	 * If the last entry of the MR wasn't a full page, then we need to
1447 	 * close it out and start a new one -- we can only merge at page
1448 	 * boundries.
1449 	 */
1450 	ret = 0;
1451 	if (len != dev->mr_page_size) {
1452 		ret = srp_finish_mapping(state, ch);
1453 		if (!ret)
1454 			srp_map_update_start(state, NULL, 0, 0);
1455 	}
1456 	return ret;
1457 }
1458 
1459 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1460 		      struct srp_request *req, struct scatterlist *scat,
1461 		      int count)
1462 {
1463 	struct srp_target_port *target = ch->target;
1464 	struct srp_device *dev = target->srp_host->srp_dev;
1465 	struct ib_device *ibdev = dev->dev;
1466 	struct scatterlist *sg;
1467 	int i;
1468 	bool use_mr;
1469 
1470 	state->desc	= req->indirect_desc;
1471 	state->pages	= req->map_page;
1472 	if (dev->use_fast_reg) {
1473 		state->next_fr = req->fr_list;
1474 		use_mr = !!ch->fr_pool;
1475 	} else {
1476 		state->next_fmr = req->fmr_list;
1477 		use_mr = !!ch->fmr_pool;
1478 	}
1479 
1480 	for_each_sg(scat, sg, count, i) {
1481 		if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1482 			/*
1483 			 * Memory registration failed, so backtrack to the
1484 			 * first unmapped entry and continue on without using
1485 			 * memory registration.
1486 			 */
1487 			dma_addr_t dma_addr;
1488 			unsigned int dma_len;
1489 
1490 backtrack:
1491 			sg = state->unmapped_sg;
1492 			i = state->unmapped_index;
1493 
1494 			dma_addr = ib_sg_dma_address(ibdev, sg);
1495 			dma_len = ib_sg_dma_len(ibdev, sg);
1496 			dma_len -= (state->unmapped_addr - dma_addr);
1497 			dma_addr = state->unmapped_addr;
1498 			use_mr = false;
1499 			srp_map_desc(state, dma_addr, dma_len, target->rkey);
1500 		}
1501 	}
1502 
1503 	if (use_mr && srp_finish_mapping(state, ch))
1504 		goto backtrack;
1505 
1506 	req->nmdesc = state->nmdesc;
1507 
1508 	return 0;
1509 }
1510 
1511 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1512 			struct srp_request *req)
1513 {
1514 	struct srp_target_port *target = ch->target;
1515 	struct scatterlist *scat;
1516 	struct srp_cmd *cmd = req->cmd->buf;
1517 	int len, nents, count;
1518 	struct srp_device *dev;
1519 	struct ib_device *ibdev;
1520 	struct srp_map_state state;
1521 	struct srp_indirect_buf *indirect_hdr;
1522 	u32 table_len;
1523 	u8 fmt;
1524 
1525 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1526 		return sizeof (struct srp_cmd);
1527 
1528 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1529 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1530 		shost_printk(KERN_WARNING, target->scsi_host,
1531 			     PFX "Unhandled data direction %d\n",
1532 			     scmnd->sc_data_direction);
1533 		return -EINVAL;
1534 	}
1535 
1536 	nents = scsi_sg_count(scmnd);
1537 	scat  = scsi_sglist(scmnd);
1538 
1539 	dev = target->srp_host->srp_dev;
1540 	ibdev = dev->dev;
1541 
1542 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1543 	if (unlikely(count == 0))
1544 		return -EIO;
1545 
1546 	fmt = SRP_DATA_DESC_DIRECT;
1547 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1548 
1549 	if (count == 1 && !register_always) {
1550 		/*
1551 		 * The midlayer only generated a single gather/scatter
1552 		 * entry, or DMA mapping coalesced everything to a
1553 		 * single entry.  So a direct descriptor along with
1554 		 * the DMA MR suffices.
1555 		 */
1556 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1557 
1558 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1559 		buf->key = cpu_to_be32(target->rkey);
1560 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1561 
1562 		req->nmdesc = 0;
1563 		goto map_complete;
1564 	}
1565 
1566 	/*
1567 	 * We have more than one scatter/gather entry, so build our indirect
1568 	 * descriptor table, trying to merge as many entries as we can.
1569 	 */
1570 	indirect_hdr = (void *) cmd->add_data;
1571 
1572 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1573 				   target->indirect_size, DMA_TO_DEVICE);
1574 
1575 	memset(&state, 0, sizeof(state));
1576 	srp_map_sg(&state, ch, req, scat, count);
1577 
1578 	/* We've mapped the request, now pull as much of the indirect
1579 	 * descriptor table as we can into the command buffer. If this
1580 	 * target is not using an external indirect table, we are
1581 	 * guaranteed to fit into the command, as the SCSI layer won't
1582 	 * give us more S/G entries than we allow.
1583 	 */
1584 	if (state.ndesc == 1) {
1585 		/*
1586 		 * Memory registration collapsed the sg-list into one entry,
1587 		 * so use a direct descriptor.
1588 		 */
1589 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1590 
1591 		*buf = req->indirect_desc[0];
1592 		goto map_complete;
1593 	}
1594 
1595 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1596 						!target->allow_ext_sg)) {
1597 		shost_printk(KERN_ERR, target->scsi_host,
1598 			     "Could not fit S/G list into SRP_CMD\n");
1599 		return -EIO;
1600 	}
1601 
1602 	count = min(state.ndesc, target->cmd_sg_cnt);
1603 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1604 
1605 	fmt = SRP_DATA_DESC_INDIRECT;
1606 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1607 	len += count * sizeof (struct srp_direct_buf);
1608 
1609 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1610 	       count * sizeof (struct srp_direct_buf));
1611 
1612 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1613 	indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1614 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1615 	indirect_hdr->len = cpu_to_be32(state.total_len);
1616 
1617 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1618 		cmd->data_out_desc_cnt = count;
1619 	else
1620 		cmd->data_in_desc_cnt = count;
1621 
1622 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1623 				      DMA_TO_DEVICE);
1624 
1625 map_complete:
1626 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1627 		cmd->buf_fmt = fmt << 4;
1628 	else
1629 		cmd->buf_fmt = fmt;
1630 
1631 	return len;
1632 }
1633 
1634 /*
1635  * Return an IU and possible credit to the free pool
1636  */
1637 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1638 			  enum srp_iu_type iu_type)
1639 {
1640 	unsigned long flags;
1641 
1642 	spin_lock_irqsave(&ch->lock, flags);
1643 	list_add(&iu->list, &ch->free_tx);
1644 	if (iu_type != SRP_IU_RSP)
1645 		++ch->req_lim;
1646 	spin_unlock_irqrestore(&ch->lock, flags);
1647 }
1648 
1649 /*
1650  * Must be called with ch->lock held to protect req_lim and free_tx.
1651  * If IU is not sent, it must be returned using srp_put_tx_iu().
1652  *
1653  * Note:
1654  * An upper limit for the number of allocated information units for each
1655  * request type is:
1656  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1657  *   more than Scsi_Host.can_queue requests.
1658  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1659  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1660  *   one unanswered SRP request to an initiator.
1661  */
1662 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1663 				      enum srp_iu_type iu_type)
1664 {
1665 	struct srp_target_port *target = ch->target;
1666 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1667 	struct srp_iu *iu;
1668 
1669 	srp_send_completion(ch->send_cq, ch);
1670 
1671 	if (list_empty(&ch->free_tx))
1672 		return NULL;
1673 
1674 	/* Initiator responses to target requests do not consume credits */
1675 	if (iu_type != SRP_IU_RSP) {
1676 		if (ch->req_lim <= rsv) {
1677 			++target->zero_req_lim;
1678 			return NULL;
1679 		}
1680 
1681 		--ch->req_lim;
1682 	}
1683 
1684 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1685 	list_del(&iu->list);
1686 	return iu;
1687 }
1688 
1689 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1690 {
1691 	struct srp_target_port *target = ch->target;
1692 	struct ib_sge list;
1693 	struct ib_send_wr wr, *bad_wr;
1694 
1695 	list.addr   = iu->dma;
1696 	list.length = len;
1697 	list.lkey   = target->lkey;
1698 
1699 	wr.next       = NULL;
1700 	wr.wr_id      = (uintptr_t) iu;
1701 	wr.sg_list    = &list;
1702 	wr.num_sge    = 1;
1703 	wr.opcode     = IB_WR_SEND;
1704 	wr.send_flags = IB_SEND_SIGNALED;
1705 
1706 	return ib_post_send(ch->qp, &wr, &bad_wr);
1707 }
1708 
1709 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1710 {
1711 	struct srp_target_port *target = ch->target;
1712 	struct ib_recv_wr wr, *bad_wr;
1713 	struct ib_sge list;
1714 
1715 	list.addr   = iu->dma;
1716 	list.length = iu->size;
1717 	list.lkey   = target->lkey;
1718 
1719 	wr.next     = NULL;
1720 	wr.wr_id    = (uintptr_t) iu;
1721 	wr.sg_list  = &list;
1722 	wr.num_sge  = 1;
1723 
1724 	return ib_post_recv(ch->qp, &wr, &bad_wr);
1725 }
1726 
1727 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1728 {
1729 	struct srp_target_port *target = ch->target;
1730 	struct srp_request *req;
1731 	struct scsi_cmnd *scmnd;
1732 	unsigned long flags;
1733 
1734 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1735 		spin_lock_irqsave(&ch->lock, flags);
1736 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1737 		spin_unlock_irqrestore(&ch->lock, flags);
1738 
1739 		ch->tsk_mgmt_status = -1;
1740 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1741 			ch->tsk_mgmt_status = rsp->data[3];
1742 		complete(&ch->tsk_mgmt_done);
1743 	} else {
1744 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1745 		if (scmnd) {
1746 			req = (void *)scmnd->host_scribble;
1747 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1748 		}
1749 		if (!scmnd) {
1750 			shost_printk(KERN_ERR, target->scsi_host,
1751 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1752 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1753 
1754 			spin_lock_irqsave(&ch->lock, flags);
1755 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1756 			spin_unlock_irqrestore(&ch->lock, flags);
1757 
1758 			return;
1759 		}
1760 		scmnd->result = rsp->status;
1761 
1762 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1763 			memcpy(scmnd->sense_buffer, rsp->data +
1764 			       be32_to_cpu(rsp->resp_data_len),
1765 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1766 				     SCSI_SENSE_BUFFERSIZE));
1767 		}
1768 
1769 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1770 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1771 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1772 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1773 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1774 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1775 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1776 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1777 
1778 		srp_free_req(ch, req, scmnd,
1779 			     be32_to_cpu(rsp->req_lim_delta));
1780 
1781 		scmnd->host_scribble = NULL;
1782 		scmnd->scsi_done(scmnd);
1783 	}
1784 }
1785 
1786 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1787 			       void *rsp, int len)
1788 {
1789 	struct srp_target_port *target = ch->target;
1790 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1791 	unsigned long flags;
1792 	struct srp_iu *iu;
1793 	int err;
1794 
1795 	spin_lock_irqsave(&ch->lock, flags);
1796 	ch->req_lim += req_delta;
1797 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1798 	spin_unlock_irqrestore(&ch->lock, flags);
1799 
1800 	if (!iu) {
1801 		shost_printk(KERN_ERR, target->scsi_host, PFX
1802 			     "no IU available to send response\n");
1803 		return 1;
1804 	}
1805 
1806 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1807 	memcpy(iu->buf, rsp, len);
1808 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1809 
1810 	err = srp_post_send(ch, iu, len);
1811 	if (err) {
1812 		shost_printk(KERN_ERR, target->scsi_host, PFX
1813 			     "unable to post response: %d\n", err);
1814 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1815 	}
1816 
1817 	return err;
1818 }
1819 
1820 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1821 				 struct srp_cred_req *req)
1822 {
1823 	struct srp_cred_rsp rsp = {
1824 		.opcode = SRP_CRED_RSP,
1825 		.tag = req->tag,
1826 	};
1827 	s32 delta = be32_to_cpu(req->req_lim_delta);
1828 
1829 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1830 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1831 			     "problems processing SRP_CRED_REQ\n");
1832 }
1833 
1834 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1835 				struct srp_aer_req *req)
1836 {
1837 	struct srp_target_port *target = ch->target;
1838 	struct srp_aer_rsp rsp = {
1839 		.opcode = SRP_AER_RSP,
1840 		.tag = req->tag,
1841 	};
1842 	s32 delta = be32_to_cpu(req->req_lim_delta);
1843 
1844 	shost_printk(KERN_ERR, target->scsi_host, PFX
1845 		     "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1846 
1847 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1848 		shost_printk(KERN_ERR, target->scsi_host, PFX
1849 			     "problems processing SRP_AER_REQ\n");
1850 }
1851 
1852 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1853 {
1854 	struct srp_target_port *target = ch->target;
1855 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1856 	struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1857 	int res;
1858 	u8 opcode;
1859 
1860 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1861 				   DMA_FROM_DEVICE);
1862 
1863 	opcode = *(u8 *) iu->buf;
1864 
1865 	if (0) {
1866 		shost_printk(KERN_ERR, target->scsi_host,
1867 			     PFX "recv completion, opcode 0x%02x\n", opcode);
1868 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1869 			       iu->buf, wc->byte_len, true);
1870 	}
1871 
1872 	switch (opcode) {
1873 	case SRP_RSP:
1874 		srp_process_rsp(ch, iu->buf);
1875 		break;
1876 
1877 	case SRP_CRED_REQ:
1878 		srp_process_cred_req(ch, iu->buf);
1879 		break;
1880 
1881 	case SRP_AER_REQ:
1882 		srp_process_aer_req(ch, iu->buf);
1883 		break;
1884 
1885 	case SRP_T_LOGOUT:
1886 		/* XXX Handle target logout */
1887 		shost_printk(KERN_WARNING, target->scsi_host,
1888 			     PFX "Got target logout request\n");
1889 		break;
1890 
1891 	default:
1892 		shost_printk(KERN_WARNING, target->scsi_host,
1893 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1894 		break;
1895 	}
1896 
1897 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1898 				      DMA_FROM_DEVICE);
1899 
1900 	res = srp_post_recv(ch, iu);
1901 	if (res != 0)
1902 		shost_printk(KERN_ERR, target->scsi_host,
1903 			     PFX "Recv failed with error code %d\n", res);
1904 }
1905 
1906 /**
1907  * srp_tl_err_work() - handle a transport layer error
1908  * @work: Work structure embedded in an SRP target port.
1909  *
1910  * Note: This function may get invoked before the rport has been created,
1911  * hence the target->rport test.
1912  */
1913 static void srp_tl_err_work(struct work_struct *work)
1914 {
1915 	struct srp_target_port *target;
1916 
1917 	target = container_of(work, struct srp_target_port, tl_err_work);
1918 	if (target->rport)
1919 		srp_start_tl_fail_timers(target->rport);
1920 }
1921 
1922 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1923 			      bool send_err, struct srp_rdma_ch *ch)
1924 {
1925 	struct srp_target_port *target = ch->target;
1926 
1927 	if (wr_id == SRP_LAST_WR_ID) {
1928 		complete(&ch->done);
1929 		return;
1930 	}
1931 
1932 	if (target->connected && !target->qp_in_error) {
1933 		if (wr_id & LOCAL_INV_WR_ID_MASK) {
1934 			shost_printk(KERN_ERR, target->scsi_host, PFX
1935 				     "LOCAL_INV failed with status %d\n",
1936 				     wc_status);
1937 		} else if (wr_id & FAST_REG_WR_ID_MASK) {
1938 			shost_printk(KERN_ERR, target->scsi_host, PFX
1939 				     "FAST_REG_MR failed status %d\n",
1940 				     wc_status);
1941 		} else {
1942 			shost_printk(KERN_ERR, target->scsi_host,
1943 				     PFX "failed %s status %d for iu %p\n",
1944 				     send_err ? "send" : "receive",
1945 				     wc_status, (void *)(uintptr_t)wr_id);
1946 		}
1947 		queue_work(system_long_wq, &target->tl_err_work);
1948 	}
1949 	target->qp_in_error = true;
1950 }
1951 
1952 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1953 {
1954 	struct srp_rdma_ch *ch = ch_ptr;
1955 	struct ib_wc wc;
1956 
1957 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1958 	while (ib_poll_cq(cq, 1, &wc) > 0) {
1959 		if (likely(wc.status == IB_WC_SUCCESS)) {
1960 			srp_handle_recv(ch, &wc);
1961 		} else {
1962 			srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1963 		}
1964 	}
1965 }
1966 
1967 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1968 {
1969 	struct srp_rdma_ch *ch = ch_ptr;
1970 	struct ib_wc wc;
1971 	struct srp_iu *iu;
1972 
1973 	while (ib_poll_cq(cq, 1, &wc) > 0) {
1974 		if (likely(wc.status == IB_WC_SUCCESS)) {
1975 			iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1976 			list_add(&iu->list, &ch->free_tx);
1977 		} else {
1978 			srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1979 		}
1980 	}
1981 }
1982 
1983 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1984 {
1985 	struct srp_target_port *target = host_to_target(shost);
1986 	struct srp_rport *rport = target->rport;
1987 	struct srp_rdma_ch *ch;
1988 	struct srp_request *req;
1989 	struct srp_iu *iu;
1990 	struct srp_cmd *cmd;
1991 	struct ib_device *dev;
1992 	unsigned long flags;
1993 	u32 tag;
1994 	u16 idx;
1995 	int len, ret;
1996 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1997 
1998 	/*
1999 	 * The SCSI EH thread is the only context from which srp_queuecommand()
2000 	 * can get invoked for blocked devices (SDEV_BLOCK /
2001 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2002 	 * locking the rport mutex if invoked from inside the SCSI EH.
2003 	 */
2004 	if (in_scsi_eh)
2005 		mutex_lock(&rport->mutex);
2006 
2007 	scmnd->result = srp_chkready(target->rport);
2008 	if (unlikely(scmnd->result))
2009 		goto err;
2010 
2011 	WARN_ON_ONCE(scmnd->request->tag < 0);
2012 	tag = blk_mq_unique_tag(scmnd->request);
2013 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2014 	idx = blk_mq_unique_tag_to_tag(tag);
2015 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2016 		  dev_name(&shost->shost_gendev), tag, idx,
2017 		  target->req_ring_size);
2018 
2019 	spin_lock_irqsave(&ch->lock, flags);
2020 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2021 	spin_unlock_irqrestore(&ch->lock, flags);
2022 
2023 	if (!iu)
2024 		goto err;
2025 
2026 	req = &ch->req_ring[idx];
2027 	dev = target->srp_host->srp_dev->dev;
2028 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2029 				   DMA_TO_DEVICE);
2030 
2031 	scmnd->host_scribble = (void *) req;
2032 
2033 	cmd = iu->buf;
2034 	memset(cmd, 0, sizeof *cmd);
2035 
2036 	cmd->opcode = SRP_CMD;
2037 	cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
2038 	cmd->tag    = tag;
2039 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2040 
2041 	req->scmnd    = scmnd;
2042 	req->cmd      = iu;
2043 
2044 	len = srp_map_data(scmnd, ch, req);
2045 	if (len < 0) {
2046 		shost_printk(KERN_ERR, target->scsi_host,
2047 			     PFX "Failed to map data (%d)\n", len);
2048 		/*
2049 		 * If we ran out of memory descriptors (-ENOMEM) because an
2050 		 * application is queuing many requests with more than
2051 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2052 		 * to reduce queue depth temporarily.
2053 		 */
2054 		scmnd->result = len == -ENOMEM ?
2055 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2056 		goto err_iu;
2057 	}
2058 
2059 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2060 				      DMA_TO_DEVICE);
2061 
2062 	if (srp_post_send(ch, iu, len)) {
2063 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2064 		goto err_unmap;
2065 	}
2066 
2067 	ret = 0;
2068 
2069 unlock_rport:
2070 	if (in_scsi_eh)
2071 		mutex_unlock(&rport->mutex);
2072 
2073 	return ret;
2074 
2075 err_unmap:
2076 	srp_unmap_data(scmnd, ch, req);
2077 
2078 err_iu:
2079 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2080 
2081 	/*
2082 	 * Avoid that the loops that iterate over the request ring can
2083 	 * encounter a dangling SCSI command pointer.
2084 	 */
2085 	req->scmnd = NULL;
2086 
2087 err:
2088 	if (scmnd->result) {
2089 		scmnd->scsi_done(scmnd);
2090 		ret = 0;
2091 	} else {
2092 		ret = SCSI_MLQUEUE_HOST_BUSY;
2093 	}
2094 
2095 	goto unlock_rport;
2096 }
2097 
2098 /*
2099  * Note: the resources allocated in this function are freed in
2100  * srp_free_ch_ib().
2101  */
2102 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2103 {
2104 	struct srp_target_port *target = ch->target;
2105 	int i;
2106 
2107 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2108 			      GFP_KERNEL);
2109 	if (!ch->rx_ring)
2110 		goto err_no_ring;
2111 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2112 			      GFP_KERNEL);
2113 	if (!ch->tx_ring)
2114 		goto err_no_ring;
2115 
2116 	for (i = 0; i < target->queue_size; ++i) {
2117 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2118 					      ch->max_ti_iu_len,
2119 					      GFP_KERNEL, DMA_FROM_DEVICE);
2120 		if (!ch->rx_ring[i])
2121 			goto err;
2122 	}
2123 
2124 	for (i = 0; i < target->queue_size; ++i) {
2125 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2126 					      target->max_iu_len,
2127 					      GFP_KERNEL, DMA_TO_DEVICE);
2128 		if (!ch->tx_ring[i])
2129 			goto err;
2130 
2131 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2132 	}
2133 
2134 	return 0;
2135 
2136 err:
2137 	for (i = 0; i < target->queue_size; ++i) {
2138 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2139 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2140 	}
2141 
2142 
2143 err_no_ring:
2144 	kfree(ch->tx_ring);
2145 	ch->tx_ring = NULL;
2146 	kfree(ch->rx_ring);
2147 	ch->rx_ring = NULL;
2148 
2149 	return -ENOMEM;
2150 }
2151 
2152 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2153 {
2154 	uint64_t T_tr_ns, max_compl_time_ms;
2155 	uint32_t rq_tmo_jiffies;
2156 
2157 	/*
2158 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2159 	 * table 91), both the QP timeout and the retry count have to be set
2160 	 * for RC QP's during the RTR to RTS transition.
2161 	 */
2162 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2163 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2164 
2165 	/*
2166 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2167 	 * it can take before an error completion is generated. See also
2168 	 * C9-140..142 in the IBTA spec for more information about how to
2169 	 * convert the QP Local ACK Timeout value to nanoseconds.
2170 	 */
2171 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2172 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2173 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2174 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2175 
2176 	return rq_tmo_jiffies;
2177 }
2178 
2179 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2180 			       struct srp_login_rsp *lrsp,
2181 			       struct srp_rdma_ch *ch)
2182 {
2183 	struct srp_target_port *target = ch->target;
2184 	struct ib_qp_attr *qp_attr = NULL;
2185 	int attr_mask = 0;
2186 	int ret;
2187 	int i;
2188 
2189 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2190 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2191 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2192 
2193 		/*
2194 		 * Reserve credits for task management so we don't
2195 		 * bounce requests back to the SCSI mid-layer.
2196 		 */
2197 		target->scsi_host->can_queue
2198 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2199 			      target->scsi_host->can_queue);
2200 		target->scsi_host->cmd_per_lun
2201 			= min_t(int, target->scsi_host->can_queue,
2202 				target->scsi_host->cmd_per_lun);
2203 	} else {
2204 		shost_printk(KERN_WARNING, target->scsi_host,
2205 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2206 		ret = -ECONNRESET;
2207 		goto error;
2208 	}
2209 
2210 	if (!ch->rx_ring) {
2211 		ret = srp_alloc_iu_bufs(ch);
2212 		if (ret)
2213 			goto error;
2214 	}
2215 
2216 	ret = -ENOMEM;
2217 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2218 	if (!qp_attr)
2219 		goto error;
2220 
2221 	qp_attr->qp_state = IB_QPS_RTR;
2222 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2223 	if (ret)
2224 		goto error_free;
2225 
2226 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2227 	if (ret)
2228 		goto error_free;
2229 
2230 	for (i = 0; i < target->queue_size; i++) {
2231 		struct srp_iu *iu = ch->rx_ring[i];
2232 
2233 		ret = srp_post_recv(ch, iu);
2234 		if (ret)
2235 			goto error_free;
2236 	}
2237 
2238 	qp_attr->qp_state = IB_QPS_RTS;
2239 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2240 	if (ret)
2241 		goto error_free;
2242 
2243 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2244 
2245 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2246 	if (ret)
2247 		goto error_free;
2248 
2249 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2250 
2251 error_free:
2252 	kfree(qp_attr);
2253 
2254 error:
2255 	ch->status = ret;
2256 }
2257 
2258 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2259 			       struct ib_cm_event *event,
2260 			       struct srp_rdma_ch *ch)
2261 {
2262 	struct srp_target_port *target = ch->target;
2263 	struct Scsi_Host *shost = target->scsi_host;
2264 	struct ib_class_port_info *cpi;
2265 	int opcode;
2266 
2267 	switch (event->param.rej_rcvd.reason) {
2268 	case IB_CM_REJ_PORT_CM_REDIRECT:
2269 		cpi = event->param.rej_rcvd.ari;
2270 		ch->path.dlid = cpi->redirect_lid;
2271 		ch->path.pkey = cpi->redirect_pkey;
2272 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2273 		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2274 
2275 		ch->status = ch->path.dlid ?
2276 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2277 		break;
2278 
2279 	case IB_CM_REJ_PORT_REDIRECT:
2280 		if (srp_target_is_topspin(target)) {
2281 			/*
2282 			 * Topspin/Cisco SRP gateways incorrectly send
2283 			 * reject reason code 25 when they mean 24
2284 			 * (port redirect).
2285 			 */
2286 			memcpy(ch->path.dgid.raw,
2287 			       event->param.rej_rcvd.ari, 16);
2288 
2289 			shost_printk(KERN_DEBUG, shost,
2290 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2291 				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2292 				     be64_to_cpu(ch->path.dgid.global.interface_id));
2293 
2294 			ch->status = SRP_PORT_REDIRECT;
2295 		} else {
2296 			shost_printk(KERN_WARNING, shost,
2297 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2298 			ch->status = -ECONNRESET;
2299 		}
2300 		break;
2301 
2302 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2303 		shost_printk(KERN_WARNING, shost,
2304 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2305 		ch->status = -ECONNRESET;
2306 		break;
2307 
2308 	case IB_CM_REJ_CONSUMER_DEFINED:
2309 		opcode = *(u8 *) event->private_data;
2310 		if (opcode == SRP_LOGIN_REJ) {
2311 			struct srp_login_rej *rej = event->private_data;
2312 			u32 reason = be32_to_cpu(rej->reason);
2313 
2314 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2315 				shost_printk(KERN_WARNING, shost,
2316 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2317 			else
2318 				shost_printk(KERN_WARNING, shost, PFX
2319 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2320 					     target->sgid.raw,
2321 					     target->orig_dgid.raw, reason);
2322 		} else
2323 			shost_printk(KERN_WARNING, shost,
2324 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2325 				     " opcode 0x%02x\n", opcode);
2326 		ch->status = -ECONNRESET;
2327 		break;
2328 
2329 	case IB_CM_REJ_STALE_CONN:
2330 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2331 		ch->status = SRP_STALE_CONN;
2332 		break;
2333 
2334 	default:
2335 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2336 			     event->param.rej_rcvd.reason);
2337 		ch->status = -ECONNRESET;
2338 	}
2339 }
2340 
2341 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2342 {
2343 	struct srp_rdma_ch *ch = cm_id->context;
2344 	struct srp_target_port *target = ch->target;
2345 	int comp = 0;
2346 
2347 	switch (event->event) {
2348 	case IB_CM_REQ_ERROR:
2349 		shost_printk(KERN_DEBUG, target->scsi_host,
2350 			     PFX "Sending CM REQ failed\n");
2351 		comp = 1;
2352 		ch->status = -ECONNRESET;
2353 		break;
2354 
2355 	case IB_CM_REP_RECEIVED:
2356 		comp = 1;
2357 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2358 		break;
2359 
2360 	case IB_CM_REJ_RECEIVED:
2361 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2362 		comp = 1;
2363 
2364 		srp_cm_rej_handler(cm_id, event, ch);
2365 		break;
2366 
2367 	case IB_CM_DREQ_RECEIVED:
2368 		shost_printk(KERN_WARNING, target->scsi_host,
2369 			     PFX "DREQ received - connection closed\n");
2370 		srp_change_conn_state(target, false);
2371 		if (ib_send_cm_drep(cm_id, NULL, 0))
2372 			shost_printk(KERN_ERR, target->scsi_host,
2373 				     PFX "Sending CM DREP failed\n");
2374 		queue_work(system_long_wq, &target->tl_err_work);
2375 		break;
2376 
2377 	case IB_CM_TIMEWAIT_EXIT:
2378 		shost_printk(KERN_ERR, target->scsi_host,
2379 			     PFX "connection closed\n");
2380 		comp = 1;
2381 
2382 		ch->status = 0;
2383 		break;
2384 
2385 	case IB_CM_MRA_RECEIVED:
2386 	case IB_CM_DREQ_ERROR:
2387 	case IB_CM_DREP_RECEIVED:
2388 		break;
2389 
2390 	default:
2391 		shost_printk(KERN_WARNING, target->scsi_host,
2392 			     PFX "Unhandled CM event %d\n", event->event);
2393 		break;
2394 	}
2395 
2396 	if (comp)
2397 		complete(&ch->done);
2398 
2399 	return 0;
2400 }
2401 
2402 /**
2403  * srp_change_queue_depth - setting device queue depth
2404  * @sdev: scsi device struct
2405  * @qdepth: requested queue depth
2406  *
2407  * Returns queue depth.
2408  */
2409 static int
2410 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2411 {
2412 	if (!sdev->tagged_supported)
2413 		qdepth = 1;
2414 	return scsi_change_queue_depth(sdev, qdepth);
2415 }
2416 
2417 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2418 			     unsigned int lun, u8 func)
2419 {
2420 	struct srp_target_port *target = ch->target;
2421 	struct srp_rport *rport = target->rport;
2422 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2423 	struct srp_iu *iu;
2424 	struct srp_tsk_mgmt *tsk_mgmt;
2425 
2426 	if (!target->connected || target->qp_in_error)
2427 		return -1;
2428 
2429 	init_completion(&ch->tsk_mgmt_done);
2430 
2431 	/*
2432 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2433 	 * invoked while a task management function is being sent.
2434 	 */
2435 	mutex_lock(&rport->mutex);
2436 	spin_lock_irq(&ch->lock);
2437 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2438 	spin_unlock_irq(&ch->lock);
2439 
2440 	if (!iu) {
2441 		mutex_unlock(&rport->mutex);
2442 
2443 		return -1;
2444 	}
2445 
2446 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2447 				   DMA_TO_DEVICE);
2448 	tsk_mgmt = iu->buf;
2449 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2450 
2451 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2452 	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
2453 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2454 	tsk_mgmt->tsk_mgmt_func = func;
2455 	tsk_mgmt->task_tag	= req_tag;
2456 
2457 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2458 				      DMA_TO_DEVICE);
2459 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2460 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2461 		mutex_unlock(&rport->mutex);
2462 
2463 		return -1;
2464 	}
2465 	mutex_unlock(&rport->mutex);
2466 
2467 	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2468 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2469 		return -1;
2470 
2471 	return 0;
2472 }
2473 
2474 static int srp_abort(struct scsi_cmnd *scmnd)
2475 {
2476 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2477 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2478 	u32 tag;
2479 	u16 ch_idx;
2480 	struct srp_rdma_ch *ch;
2481 	int ret;
2482 
2483 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2484 
2485 	if (!req)
2486 		return SUCCESS;
2487 	tag = blk_mq_unique_tag(scmnd->request);
2488 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2489 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2490 		return SUCCESS;
2491 	ch = &target->ch[ch_idx];
2492 	if (!srp_claim_req(ch, req, NULL, scmnd))
2493 		return SUCCESS;
2494 	shost_printk(KERN_ERR, target->scsi_host,
2495 		     "Sending SRP abort for tag %#x\n", tag);
2496 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2497 			      SRP_TSK_ABORT_TASK) == 0)
2498 		ret = SUCCESS;
2499 	else if (target->rport->state == SRP_RPORT_LOST)
2500 		ret = FAST_IO_FAIL;
2501 	else
2502 		ret = FAILED;
2503 	srp_free_req(ch, req, scmnd, 0);
2504 	scmnd->result = DID_ABORT << 16;
2505 	scmnd->scsi_done(scmnd);
2506 
2507 	return ret;
2508 }
2509 
2510 static int srp_reset_device(struct scsi_cmnd *scmnd)
2511 {
2512 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2513 	struct srp_rdma_ch *ch;
2514 	int i;
2515 
2516 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2517 
2518 	ch = &target->ch[0];
2519 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2520 			      SRP_TSK_LUN_RESET))
2521 		return FAILED;
2522 	if (ch->tsk_mgmt_status)
2523 		return FAILED;
2524 
2525 	for (i = 0; i < target->ch_count; i++) {
2526 		ch = &target->ch[i];
2527 		for (i = 0; i < target->req_ring_size; ++i) {
2528 			struct srp_request *req = &ch->req_ring[i];
2529 
2530 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2531 		}
2532 	}
2533 
2534 	return SUCCESS;
2535 }
2536 
2537 static int srp_reset_host(struct scsi_cmnd *scmnd)
2538 {
2539 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2540 
2541 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2542 
2543 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2544 }
2545 
2546 static int srp_slave_configure(struct scsi_device *sdev)
2547 {
2548 	struct Scsi_Host *shost = sdev->host;
2549 	struct srp_target_port *target = host_to_target(shost);
2550 	struct request_queue *q = sdev->request_queue;
2551 	unsigned long timeout;
2552 
2553 	if (sdev->type == TYPE_DISK) {
2554 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2555 		blk_queue_rq_timeout(q, timeout);
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2562 			   char *buf)
2563 {
2564 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2565 
2566 	return sprintf(buf, "0x%016llx\n",
2567 		       (unsigned long long) be64_to_cpu(target->id_ext));
2568 }
2569 
2570 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2571 			     char *buf)
2572 {
2573 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2574 
2575 	return sprintf(buf, "0x%016llx\n",
2576 		       (unsigned long long) be64_to_cpu(target->ioc_guid));
2577 }
2578 
2579 static ssize_t show_service_id(struct device *dev,
2580 			       struct device_attribute *attr, char *buf)
2581 {
2582 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2583 
2584 	return sprintf(buf, "0x%016llx\n",
2585 		       (unsigned long long) be64_to_cpu(target->service_id));
2586 }
2587 
2588 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2589 			 char *buf)
2590 {
2591 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2592 
2593 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2594 }
2595 
2596 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2597 			 char *buf)
2598 {
2599 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2600 
2601 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2602 }
2603 
2604 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2605 			 char *buf)
2606 {
2607 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2608 	struct srp_rdma_ch *ch = &target->ch[0];
2609 
2610 	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2611 }
2612 
2613 static ssize_t show_orig_dgid(struct device *dev,
2614 			      struct device_attribute *attr, char *buf)
2615 {
2616 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2617 
2618 	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2619 }
2620 
2621 static ssize_t show_req_lim(struct device *dev,
2622 			    struct device_attribute *attr, char *buf)
2623 {
2624 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2625 	struct srp_rdma_ch *ch;
2626 	int i, req_lim = INT_MAX;
2627 
2628 	for (i = 0; i < target->ch_count; i++) {
2629 		ch = &target->ch[i];
2630 		req_lim = min(req_lim, ch->req_lim);
2631 	}
2632 	return sprintf(buf, "%d\n", req_lim);
2633 }
2634 
2635 static ssize_t show_zero_req_lim(struct device *dev,
2636 				 struct device_attribute *attr, char *buf)
2637 {
2638 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2639 
2640 	return sprintf(buf, "%d\n", target->zero_req_lim);
2641 }
2642 
2643 static ssize_t show_local_ib_port(struct device *dev,
2644 				  struct device_attribute *attr, char *buf)
2645 {
2646 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2647 
2648 	return sprintf(buf, "%d\n", target->srp_host->port);
2649 }
2650 
2651 static ssize_t show_local_ib_device(struct device *dev,
2652 				    struct device_attribute *attr, char *buf)
2653 {
2654 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2655 
2656 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2657 }
2658 
2659 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2660 			     char *buf)
2661 {
2662 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2663 
2664 	return sprintf(buf, "%d\n", target->ch_count);
2665 }
2666 
2667 static ssize_t show_comp_vector(struct device *dev,
2668 				struct device_attribute *attr, char *buf)
2669 {
2670 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2671 
2672 	return sprintf(buf, "%d\n", target->comp_vector);
2673 }
2674 
2675 static ssize_t show_tl_retry_count(struct device *dev,
2676 				   struct device_attribute *attr, char *buf)
2677 {
2678 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2679 
2680 	return sprintf(buf, "%d\n", target->tl_retry_count);
2681 }
2682 
2683 static ssize_t show_cmd_sg_entries(struct device *dev,
2684 				   struct device_attribute *attr, char *buf)
2685 {
2686 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2687 
2688 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2689 }
2690 
2691 static ssize_t show_allow_ext_sg(struct device *dev,
2692 				 struct device_attribute *attr, char *buf)
2693 {
2694 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2695 
2696 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2697 }
2698 
2699 static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2700 static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2701 static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2702 static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2703 static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2704 static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2705 static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
2706 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2707 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2708 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2709 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2710 static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
2711 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2712 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2713 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2714 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2715 
2716 static struct device_attribute *srp_host_attrs[] = {
2717 	&dev_attr_id_ext,
2718 	&dev_attr_ioc_guid,
2719 	&dev_attr_service_id,
2720 	&dev_attr_pkey,
2721 	&dev_attr_sgid,
2722 	&dev_attr_dgid,
2723 	&dev_attr_orig_dgid,
2724 	&dev_attr_req_lim,
2725 	&dev_attr_zero_req_lim,
2726 	&dev_attr_local_ib_port,
2727 	&dev_attr_local_ib_device,
2728 	&dev_attr_ch_count,
2729 	&dev_attr_comp_vector,
2730 	&dev_attr_tl_retry_count,
2731 	&dev_attr_cmd_sg_entries,
2732 	&dev_attr_allow_ext_sg,
2733 	NULL
2734 };
2735 
2736 static struct scsi_host_template srp_template = {
2737 	.module				= THIS_MODULE,
2738 	.name				= "InfiniBand SRP initiator",
2739 	.proc_name			= DRV_NAME,
2740 	.slave_configure		= srp_slave_configure,
2741 	.info				= srp_target_info,
2742 	.queuecommand			= srp_queuecommand,
2743 	.change_queue_depth             = srp_change_queue_depth,
2744 	.eh_abort_handler		= srp_abort,
2745 	.eh_device_reset_handler	= srp_reset_device,
2746 	.eh_host_reset_handler		= srp_reset_host,
2747 	.skip_settle_delay		= true,
2748 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
2749 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2750 	.this_id			= -1,
2751 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
2752 	.use_clustering			= ENABLE_CLUSTERING,
2753 	.shost_attrs			= srp_host_attrs,
2754 	.use_blk_tags			= 1,
2755 	.track_queue_depth		= 1,
2756 };
2757 
2758 static int srp_sdev_count(struct Scsi_Host *host)
2759 {
2760 	struct scsi_device *sdev;
2761 	int c = 0;
2762 
2763 	shost_for_each_device(sdev, host)
2764 		c++;
2765 
2766 	return c;
2767 }
2768 
2769 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2770 {
2771 	struct srp_rport_identifiers ids;
2772 	struct srp_rport *rport;
2773 
2774 	target->state = SRP_TARGET_SCANNING;
2775 	sprintf(target->target_name, "SRP.T10:%016llX",
2776 		 (unsigned long long) be64_to_cpu(target->id_ext));
2777 
2778 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2779 		return -ENODEV;
2780 
2781 	memcpy(ids.port_id, &target->id_ext, 8);
2782 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2783 	ids.roles = SRP_RPORT_ROLE_TARGET;
2784 	rport = srp_rport_add(target->scsi_host, &ids);
2785 	if (IS_ERR(rport)) {
2786 		scsi_remove_host(target->scsi_host);
2787 		return PTR_ERR(rport);
2788 	}
2789 
2790 	rport->lld_data = target;
2791 	target->rport = rport;
2792 
2793 	spin_lock(&host->target_lock);
2794 	list_add_tail(&target->list, &host->target_list);
2795 	spin_unlock(&host->target_lock);
2796 
2797 	scsi_scan_target(&target->scsi_host->shost_gendev,
2798 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2799 
2800 	if (!target->connected || target->qp_in_error) {
2801 		shost_printk(KERN_INFO, target->scsi_host,
2802 			     PFX "SCSI scan failed - removing SCSI host\n");
2803 		srp_queue_remove_work(target);
2804 		goto out;
2805 	}
2806 
2807 	pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2808 		 dev_name(&target->scsi_host->shost_gendev),
2809 		 srp_sdev_count(target->scsi_host));
2810 
2811 	spin_lock_irq(&target->lock);
2812 	if (target->state == SRP_TARGET_SCANNING)
2813 		target->state = SRP_TARGET_LIVE;
2814 	spin_unlock_irq(&target->lock);
2815 
2816 out:
2817 	return 0;
2818 }
2819 
2820 static void srp_release_dev(struct device *dev)
2821 {
2822 	struct srp_host *host =
2823 		container_of(dev, struct srp_host, dev);
2824 
2825 	complete(&host->released);
2826 }
2827 
2828 static struct class srp_class = {
2829 	.name    = "infiniband_srp",
2830 	.dev_release = srp_release_dev
2831 };
2832 
2833 /**
2834  * srp_conn_unique() - check whether the connection to a target is unique
2835  * @host:   SRP host.
2836  * @target: SRP target port.
2837  */
2838 static bool srp_conn_unique(struct srp_host *host,
2839 			    struct srp_target_port *target)
2840 {
2841 	struct srp_target_port *t;
2842 	bool ret = false;
2843 
2844 	if (target->state == SRP_TARGET_REMOVED)
2845 		goto out;
2846 
2847 	ret = true;
2848 
2849 	spin_lock(&host->target_lock);
2850 	list_for_each_entry(t, &host->target_list, list) {
2851 		if (t != target &&
2852 		    target->id_ext == t->id_ext &&
2853 		    target->ioc_guid == t->ioc_guid &&
2854 		    target->initiator_ext == t->initiator_ext) {
2855 			ret = false;
2856 			break;
2857 		}
2858 	}
2859 	spin_unlock(&host->target_lock);
2860 
2861 out:
2862 	return ret;
2863 }
2864 
2865 /*
2866  * Target ports are added by writing
2867  *
2868  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2869  *     pkey=<P_Key>,service_id=<service ID>
2870  *
2871  * to the add_target sysfs attribute.
2872  */
2873 enum {
2874 	SRP_OPT_ERR		= 0,
2875 	SRP_OPT_ID_EXT		= 1 << 0,
2876 	SRP_OPT_IOC_GUID	= 1 << 1,
2877 	SRP_OPT_DGID		= 1 << 2,
2878 	SRP_OPT_PKEY		= 1 << 3,
2879 	SRP_OPT_SERVICE_ID	= 1 << 4,
2880 	SRP_OPT_MAX_SECT	= 1 << 5,
2881 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
2882 	SRP_OPT_IO_CLASS	= 1 << 7,
2883 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
2884 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2885 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
2886 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
2887 	SRP_OPT_COMP_VECTOR	= 1 << 12,
2888 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
2889 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2890 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
2891 				   SRP_OPT_IOC_GUID	|
2892 				   SRP_OPT_DGID		|
2893 				   SRP_OPT_PKEY		|
2894 				   SRP_OPT_SERVICE_ID),
2895 };
2896 
2897 static const match_table_t srp_opt_tokens = {
2898 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
2899 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
2900 	{ SRP_OPT_DGID,			"dgid=%s" 		},
2901 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
2902 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
2903 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
2904 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
2905 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
2906 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
2907 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2908 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
2909 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
2910 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
2911 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
2912 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2913 	{ SRP_OPT_ERR,			NULL 			}
2914 };
2915 
2916 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2917 {
2918 	char *options, *sep_opt;
2919 	char *p;
2920 	char dgid[3];
2921 	substring_t args[MAX_OPT_ARGS];
2922 	int opt_mask = 0;
2923 	int token;
2924 	int ret = -EINVAL;
2925 	int i;
2926 
2927 	options = kstrdup(buf, GFP_KERNEL);
2928 	if (!options)
2929 		return -ENOMEM;
2930 
2931 	sep_opt = options;
2932 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2933 		if (!*p)
2934 			continue;
2935 
2936 		token = match_token(p, srp_opt_tokens, args);
2937 		opt_mask |= token;
2938 
2939 		switch (token) {
2940 		case SRP_OPT_ID_EXT:
2941 			p = match_strdup(args);
2942 			if (!p) {
2943 				ret = -ENOMEM;
2944 				goto out;
2945 			}
2946 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2947 			kfree(p);
2948 			break;
2949 
2950 		case SRP_OPT_IOC_GUID:
2951 			p = match_strdup(args);
2952 			if (!p) {
2953 				ret = -ENOMEM;
2954 				goto out;
2955 			}
2956 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2957 			kfree(p);
2958 			break;
2959 
2960 		case SRP_OPT_DGID:
2961 			p = match_strdup(args);
2962 			if (!p) {
2963 				ret = -ENOMEM;
2964 				goto out;
2965 			}
2966 			if (strlen(p) != 32) {
2967 				pr_warn("bad dest GID parameter '%s'\n", p);
2968 				kfree(p);
2969 				goto out;
2970 			}
2971 
2972 			for (i = 0; i < 16; ++i) {
2973 				strlcpy(dgid, p + i * 2, sizeof(dgid));
2974 				if (sscanf(dgid, "%hhx",
2975 					   &target->orig_dgid.raw[i]) < 1) {
2976 					ret = -EINVAL;
2977 					kfree(p);
2978 					goto out;
2979 				}
2980 			}
2981 			kfree(p);
2982 			break;
2983 
2984 		case SRP_OPT_PKEY:
2985 			if (match_hex(args, &token)) {
2986 				pr_warn("bad P_Key parameter '%s'\n", p);
2987 				goto out;
2988 			}
2989 			target->pkey = cpu_to_be16(token);
2990 			break;
2991 
2992 		case SRP_OPT_SERVICE_ID:
2993 			p = match_strdup(args);
2994 			if (!p) {
2995 				ret = -ENOMEM;
2996 				goto out;
2997 			}
2998 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2999 			kfree(p);
3000 			break;
3001 
3002 		case SRP_OPT_MAX_SECT:
3003 			if (match_int(args, &token)) {
3004 				pr_warn("bad max sect parameter '%s'\n", p);
3005 				goto out;
3006 			}
3007 			target->scsi_host->max_sectors = token;
3008 			break;
3009 
3010 		case SRP_OPT_QUEUE_SIZE:
3011 			if (match_int(args, &token) || token < 1) {
3012 				pr_warn("bad queue_size parameter '%s'\n", p);
3013 				goto out;
3014 			}
3015 			target->scsi_host->can_queue = token;
3016 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3017 					     SRP_TSK_MGMT_SQ_SIZE;
3018 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3019 				target->scsi_host->cmd_per_lun = token;
3020 			break;
3021 
3022 		case SRP_OPT_MAX_CMD_PER_LUN:
3023 			if (match_int(args, &token) || token < 1) {
3024 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3025 					p);
3026 				goto out;
3027 			}
3028 			target->scsi_host->cmd_per_lun = token;
3029 			break;
3030 
3031 		case SRP_OPT_IO_CLASS:
3032 			if (match_hex(args, &token)) {
3033 				pr_warn("bad IO class parameter '%s'\n", p);
3034 				goto out;
3035 			}
3036 			if (token != SRP_REV10_IB_IO_CLASS &&
3037 			    token != SRP_REV16A_IB_IO_CLASS) {
3038 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3039 					token, SRP_REV10_IB_IO_CLASS,
3040 					SRP_REV16A_IB_IO_CLASS);
3041 				goto out;
3042 			}
3043 			target->io_class = token;
3044 			break;
3045 
3046 		case SRP_OPT_INITIATOR_EXT:
3047 			p = match_strdup(args);
3048 			if (!p) {
3049 				ret = -ENOMEM;
3050 				goto out;
3051 			}
3052 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3053 			kfree(p);
3054 			break;
3055 
3056 		case SRP_OPT_CMD_SG_ENTRIES:
3057 			if (match_int(args, &token) || token < 1 || token > 255) {
3058 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3059 					p);
3060 				goto out;
3061 			}
3062 			target->cmd_sg_cnt = token;
3063 			break;
3064 
3065 		case SRP_OPT_ALLOW_EXT_SG:
3066 			if (match_int(args, &token)) {
3067 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3068 				goto out;
3069 			}
3070 			target->allow_ext_sg = !!token;
3071 			break;
3072 
3073 		case SRP_OPT_SG_TABLESIZE:
3074 			if (match_int(args, &token) || token < 1 ||
3075 					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3076 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3077 					p);
3078 				goto out;
3079 			}
3080 			target->sg_tablesize = token;
3081 			break;
3082 
3083 		case SRP_OPT_COMP_VECTOR:
3084 			if (match_int(args, &token) || token < 0) {
3085 				pr_warn("bad comp_vector parameter '%s'\n", p);
3086 				goto out;
3087 			}
3088 			target->comp_vector = token;
3089 			break;
3090 
3091 		case SRP_OPT_TL_RETRY_COUNT:
3092 			if (match_int(args, &token) || token < 2 || token > 7) {
3093 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3094 					p);
3095 				goto out;
3096 			}
3097 			target->tl_retry_count = token;
3098 			break;
3099 
3100 		default:
3101 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3102 				p);
3103 			goto out;
3104 		}
3105 	}
3106 
3107 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3108 		ret = 0;
3109 	else
3110 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3111 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3112 			    !(srp_opt_tokens[i].token & opt_mask))
3113 				pr_warn("target creation request is missing parameter '%s'\n",
3114 					srp_opt_tokens[i].pattern);
3115 
3116 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3117 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3118 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3119 			target->scsi_host->cmd_per_lun,
3120 			target->scsi_host->can_queue);
3121 
3122 out:
3123 	kfree(options);
3124 	return ret;
3125 }
3126 
3127 static ssize_t srp_create_target(struct device *dev,
3128 				 struct device_attribute *attr,
3129 				 const char *buf, size_t count)
3130 {
3131 	struct srp_host *host =
3132 		container_of(dev, struct srp_host, dev);
3133 	struct Scsi_Host *target_host;
3134 	struct srp_target_port *target;
3135 	struct srp_rdma_ch *ch;
3136 	struct srp_device *srp_dev = host->srp_dev;
3137 	struct ib_device *ibdev = srp_dev->dev;
3138 	int ret, node_idx, node, cpu, i;
3139 	bool multich = false;
3140 
3141 	target_host = scsi_host_alloc(&srp_template,
3142 				      sizeof (struct srp_target_port));
3143 	if (!target_host)
3144 		return -ENOMEM;
3145 
3146 	target_host->transportt  = ib_srp_transport_template;
3147 	target_host->max_channel = 0;
3148 	target_host->max_id      = 1;
3149 	target_host->max_lun     = SRP_MAX_LUN;
3150 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3151 
3152 	target = host_to_target(target_host);
3153 
3154 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3155 	target->scsi_host	= target_host;
3156 	target->srp_host	= host;
3157 	target->lkey		= host->srp_dev->mr->lkey;
3158 	target->rkey		= host->srp_dev->mr->rkey;
3159 	target->cmd_sg_cnt	= cmd_sg_entries;
3160 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3161 	target->allow_ext_sg	= allow_ext_sg;
3162 	target->tl_retry_count	= 7;
3163 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3164 
3165 	/*
3166 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3167 	 * before this function returns.
3168 	 */
3169 	scsi_host_get(target->scsi_host);
3170 
3171 	mutex_lock(&host->add_target_mutex);
3172 
3173 	ret = srp_parse_options(buf, target);
3174 	if (ret)
3175 		goto err;
3176 
3177 	ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3178 	if (ret)
3179 		goto err;
3180 
3181 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3182 
3183 	if (!srp_conn_unique(target->srp_host, target)) {
3184 		shost_printk(KERN_INFO, target->scsi_host,
3185 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3186 			     be64_to_cpu(target->id_ext),
3187 			     be64_to_cpu(target->ioc_guid),
3188 			     be64_to_cpu(target->initiator_ext));
3189 		ret = -EEXIST;
3190 		goto err;
3191 	}
3192 
3193 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3194 	    target->cmd_sg_cnt < target->sg_tablesize) {
3195 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3196 		target->sg_tablesize = target->cmd_sg_cnt;
3197 	}
3198 
3199 	target_host->sg_tablesize = target->sg_tablesize;
3200 	target->indirect_size = target->sg_tablesize *
3201 				sizeof (struct srp_direct_buf);
3202 	target->max_iu_len = sizeof (struct srp_cmd) +
3203 			     sizeof (struct srp_indirect_buf) +
3204 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3205 
3206 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3207 	INIT_WORK(&target->remove_work, srp_remove_work);
3208 	spin_lock_init(&target->lock);
3209 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3210 	if (ret)
3211 		goto err;
3212 
3213 	ret = -ENOMEM;
3214 	target->ch_count = max_t(unsigned, num_online_nodes(),
3215 				 min(ch_count ? :
3216 				     min(4 * num_online_nodes(),
3217 					 ibdev->num_comp_vectors),
3218 				     num_online_cpus()));
3219 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3220 			     GFP_KERNEL);
3221 	if (!target->ch)
3222 		goto err;
3223 
3224 	node_idx = 0;
3225 	for_each_online_node(node) {
3226 		const int ch_start = (node_idx * target->ch_count /
3227 				      num_online_nodes());
3228 		const int ch_end = ((node_idx + 1) * target->ch_count /
3229 				    num_online_nodes());
3230 		const int cv_start = (node_idx * ibdev->num_comp_vectors /
3231 				      num_online_nodes() + target->comp_vector)
3232 				     % ibdev->num_comp_vectors;
3233 		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3234 				    num_online_nodes() + target->comp_vector)
3235 				   % ibdev->num_comp_vectors;
3236 		int cpu_idx = 0;
3237 
3238 		for_each_online_cpu(cpu) {
3239 			if (cpu_to_node(cpu) != node)
3240 				continue;
3241 			if (ch_start + cpu_idx >= ch_end)
3242 				continue;
3243 			ch = &target->ch[ch_start + cpu_idx];
3244 			ch->target = target;
3245 			ch->comp_vector = cv_start == cv_end ? cv_start :
3246 				cv_start + cpu_idx % (cv_end - cv_start);
3247 			spin_lock_init(&ch->lock);
3248 			INIT_LIST_HEAD(&ch->free_tx);
3249 			ret = srp_new_cm_id(ch);
3250 			if (ret)
3251 				goto err_disconnect;
3252 
3253 			ret = srp_create_ch_ib(ch);
3254 			if (ret)
3255 				goto err_disconnect;
3256 
3257 			ret = srp_alloc_req_data(ch);
3258 			if (ret)
3259 				goto err_disconnect;
3260 
3261 			ret = srp_connect_ch(ch, multich);
3262 			if (ret) {
3263 				shost_printk(KERN_ERR, target->scsi_host,
3264 					     PFX "Connection %d/%d failed\n",
3265 					     ch_start + cpu_idx,
3266 					     target->ch_count);
3267 				if (node_idx == 0 && cpu_idx == 0) {
3268 					goto err_disconnect;
3269 				} else {
3270 					srp_free_ch_ib(target, ch);
3271 					srp_free_req_data(target, ch);
3272 					target->ch_count = ch - target->ch;
3273 					break;
3274 				}
3275 			}
3276 
3277 			multich = true;
3278 			cpu_idx++;
3279 		}
3280 		node_idx++;
3281 	}
3282 
3283 	target->scsi_host->nr_hw_queues = target->ch_count;
3284 
3285 	ret = srp_add_target(host, target);
3286 	if (ret)
3287 		goto err_disconnect;
3288 
3289 	if (target->state != SRP_TARGET_REMOVED) {
3290 		shost_printk(KERN_DEBUG, target->scsi_host, PFX
3291 			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3292 			     be64_to_cpu(target->id_ext),
3293 			     be64_to_cpu(target->ioc_guid),
3294 			     be16_to_cpu(target->pkey),
3295 			     be64_to_cpu(target->service_id),
3296 			     target->sgid.raw, target->orig_dgid.raw);
3297 	}
3298 
3299 	ret = count;
3300 
3301 out:
3302 	mutex_unlock(&host->add_target_mutex);
3303 
3304 	scsi_host_put(target->scsi_host);
3305 
3306 	return ret;
3307 
3308 err_disconnect:
3309 	srp_disconnect_target(target);
3310 
3311 	for (i = 0; i < target->ch_count; i++) {
3312 		ch = &target->ch[i];
3313 		srp_free_ch_ib(target, ch);
3314 		srp_free_req_data(target, ch);
3315 	}
3316 
3317 	kfree(target->ch);
3318 
3319 err:
3320 	scsi_host_put(target_host);
3321 	goto out;
3322 }
3323 
3324 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3325 
3326 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3327 			  char *buf)
3328 {
3329 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3330 
3331 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3332 }
3333 
3334 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3335 
3336 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3337 			 char *buf)
3338 {
3339 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3340 
3341 	return sprintf(buf, "%d\n", host->port);
3342 }
3343 
3344 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3345 
3346 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3347 {
3348 	struct srp_host *host;
3349 
3350 	host = kzalloc(sizeof *host, GFP_KERNEL);
3351 	if (!host)
3352 		return NULL;
3353 
3354 	INIT_LIST_HEAD(&host->target_list);
3355 	spin_lock_init(&host->target_lock);
3356 	init_completion(&host->released);
3357 	mutex_init(&host->add_target_mutex);
3358 	host->srp_dev = device;
3359 	host->port = port;
3360 
3361 	host->dev.class = &srp_class;
3362 	host->dev.parent = device->dev->dma_device;
3363 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3364 
3365 	if (device_register(&host->dev))
3366 		goto free_host;
3367 	if (device_create_file(&host->dev, &dev_attr_add_target))
3368 		goto err_class;
3369 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3370 		goto err_class;
3371 	if (device_create_file(&host->dev, &dev_attr_port))
3372 		goto err_class;
3373 
3374 	return host;
3375 
3376 err_class:
3377 	device_unregister(&host->dev);
3378 
3379 free_host:
3380 	kfree(host);
3381 
3382 	return NULL;
3383 }
3384 
3385 static void srp_add_one(struct ib_device *device)
3386 {
3387 	struct srp_device *srp_dev;
3388 	struct ib_device_attr *dev_attr;
3389 	struct srp_host *host;
3390 	int mr_page_shift, s, e, p;
3391 	u64 max_pages_per_mr;
3392 
3393 	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3394 	if (!dev_attr)
3395 		return;
3396 
3397 	if (ib_query_device(device, dev_attr)) {
3398 		pr_warn("Query device failed for %s\n", device->name);
3399 		goto free_attr;
3400 	}
3401 
3402 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3403 	if (!srp_dev)
3404 		goto free_attr;
3405 
3406 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3407 			    device->map_phys_fmr && device->unmap_fmr);
3408 	srp_dev->has_fr = (dev_attr->device_cap_flags &
3409 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
3410 	if (!srp_dev->has_fmr && !srp_dev->has_fr)
3411 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3412 
3413 	srp_dev->use_fast_reg = (srp_dev->has_fr &&
3414 				 (!srp_dev->has_fmr || prefer_fr));
3415 
3416 	/*
3417 	 * Use the smallest page size supported by the HCA, down to a
3418 	 * minimum of 4096 bytes. We're unlikely to build large sglists
3419 	 * out of smaller entries.
3420 	 */
3421 	mr_page_shift		= max(12, ffs(dev_attr->page_size_cap) - 1);
3422 	srp_dev->mr_page_size	= 1 << mr_page_shift;
3423 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
3424 	max_pages_per_mr	= dev_attr->max_mr_size;
3425 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
3426 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3427 					  max_pages_per_mr);
3428 	if (srp_dev->use_fast_reg) {
3429 		srp_dev->max_pages_per_mr =
3430 			min_t(u32, srp_dev->max_pages_per_mr,
3431 			      dev_attr->max_fast_reg_page_list_len);
3432 	}
3433 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
3434 				   srp_dev->max_pages_per_mr;
3435 	pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3436 		 device->name, mr_page_shift, dev_attr->max_mr_size,
3437 		 dev_attr->max_fast_reg_page_list_len,
3438 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3439 
3440 	INIT_LIST_HEAD(&srp_dev->dev_list);
3441 
3442 	srp_dev->dev = device;
3443 	srp_dev->pd  = ib_alloc_pd(device);
3444 	if (IS_ERR(srp_dev->pd))
3445 		goto free_dev;
3446 
3447 	srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3448 				    IB_ACCESS_LOCAL_WRITE |
3449 				    IB_ACCESS_REMOTE_READ |
3450 				    IB_ACCESS_REMOTE_WRITE);
3451 	if (IS_ERR(srp_dev->mr))
3452 		goto err_pd;
3453 
3454 	if (device->node_type == RDMA_NODE_IB_SWITCH) {
3455 		s = 0;
3456 		e = 0;
3457 	} else {
3458 		s = 1;
3459 		e = device->phys_port_cnt;
3460 	}
3461 
3462 	for (p = s; p <= e; ++p) {
3463 		host = srp_add_port(srp_dev, p);
3464 		if (host)
3465 			list_add_tail(&host->list, &srp_dev->dev_list);
3466 	}
3467 
3468 	ib_set_client_data(device, &srp_client, srp_dev);
3469 
3470 	goto free_attr;
3471 
3472 err_pd:
3473 	ib_dealloc_pd(srp_dev->pd);
3474 
3475 free_dev:
3476 	kfree(srp_dev);
3477 
3478 free_attr:
3479 	kfree(dev_attr);
3480 }
3481 
3482 static void srp_remove_one(struct ib_device *device)
3483 {
3484 	struct srp_device *srp_dev;
3485 	struct srp_host *host, *tmp_host;
3486 	struct srp_target_port *target;
3487 
3488 	srp_dev = ib_get_client_data(device, &srp_client);
3489 	if (!srp_dev)
3490 		return;
3491 
3492 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3493 		device_unregister(&host->dev);
3494 		/*
3495 		 * Wait for the sysfs entry to go away, so that no new
3496 		 * target ports can be created.
3497 		 */
3498 		wait_for_completion(&host->released);
3499 
3500 		/*
3501 		 * Remove all target ports.
3502 		 */
3503 		spin_lock(&host->target_lock);
3504 		list_for_each_entry(target, &host->target_list, list)
3505 			srp_queue_remove_work(target);
3506 		spin_unlock(&host->target_lock);
3507 
3508 		/*
3509 		 * Wait for tl_err and target port removal tasks.
3510 		 */
3511 		flush_workqueue(system_long_wq);
3512 		flush_workqueue(srp_remove_wq);
3513 
3514 		kfree(host);
3515 	}
3516 
3517 	ib_dereg_mr(srp_dev->mr);
3518 	ib_dealloc_pd(srp_dev->pd);
3519 
3520 	kfree(srp_dev);
3521 }
3522 
3523 static struct srp_function_template ib_srp_transport_functions = {
3524 	.has_rport_state	 = true,
3525 	.reset_timer_if_blocked	 = true,
3526 	.reconnect_delay	 = &srp_reconnect_delay,
3527 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3528 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3529 	.reconnect		 = srp_rport_reconnect,
3530 	.rport_delete		 = srp_rport_delete,
3531 	.terminate_rport_io	 = srp_terminate_io,
3532 };
3533 
3534 static int __init srp_init_module(void)
3535 {
3536 	int ret;
3537 
3538 	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3539 
3540 	if (srp_sg_tablesize) {
3541 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3542 		if (!cmd_sg_entries)
3543 			cmd_sg_entries = srp_sg_tablesize;
3544 	}
3545 
3546 	if (!cmd_sg_entries)
3547 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3548 
3549 	if (cmd_sg_entries > 255) {
3550 		pr_warn("Clamping cmd_sg_entries to 255\n");
3551 		cmd_sg_entries = 255;
3552 	}
3553 
3554 	if (!indirect_sg_entries)
3555 		indirect_sg_entries = cmd_sg_entries;
3556 	else if (indirect_sg_entries < cmd_sg_entries) {
3557 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3558 			cmd_sg_entries);
3559 		indirect_sg_entries = cmd_sg_entries;
3560 	}
3561 
3562 	srp_remove_wq = create_workqueue("srp_remove");
3563 	if (!srp_remove_wq) {
3564 		ret = -ENOMEM;
3565 		goto out;
3566 	}
3567 
3568 	ret = -ENOMEM;
3569 	ib_srp_transport_template =
3570 		srp_attach_transport(&ib_srp_transport_functions);
3571 	if (!ib_srp_transport_template)
3572 		goto destroy_wq;
3573 
3574 	ret = class_register(&srp_class);
3575 	if (ret) {
3576 		pr_err("couldn't register class infiniband_srp\n");
3577 		goto release_tr;
3578 	}
3579 
3580 	ib_sa_register_client(&srp_sa_client);
3581 
3582 	ret = ib_register_client(&srp_client);
3583 	if (ret) {
3584 		pr_err("couldn't register IB client\n");
3585 		goto unreg_sa;
3586 	}
3587 
3588 out:
3589 	return ret;
3590 
3591 unreg_sa:
3592 	ib_sa_unregister_client(&srp_sa_client);
3593 	class_unregister(&srp_class);
3594 
3595 release_tr:
3596 	srp_release_transport(ib_srp_transport_template);
3597 
3598 destroy_wq:
3599 	destroy_workqueue(srp_remove_wq);
3600 	goto out;
3601 }
3602 
3603 static void __exit srp_cleanup_module(void)
3604 {
3605 	ib_unregister_client(&srp_client);
3606 	ib_sa_unregister_client(&srp_sa_client);
3607 	class_unregister(&srp_class);
3608 	srp_release_transport(ib_srp_transport_template);
3609 	destroy_workqueue(srp_remove_wq);
3610 }
3611 
3612 module_init(srp_init_module);
3613 module_exit(srp_cleanup_module);
3614