1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 #include <linux/err.h> 37 #include <linux/string.h> 38 #include <linux/parser.h> 39 #include <linux/random.h> 40 #include <linux/jiffies.h> 41 42 #include <asm/atomic.h> 43 44 #include <scsi/scsi.h> 45 #include <scsi/scsi_device.h> 46 #include <scsi/scsi_dbg.h> 47 #include <scsi/srp.h> 48 #include <scsi/scsi_transport_srp.h> 49 50 #include "ib_srp.h" 51 52 #define DRV_NAME "ib_srp" 53 #define PFX DRV_NAME ": " 54 #define DRV_VERSION "0.2" 55 #define DRV_RELDATE "November 1, 2005" 56 57 MODULE_AUTHOR("Roland Dreier"); 58 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 59 "v" DRV_VERSION " (" DRV_RELDATE ")"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 62 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE; 63 static int srp_max_iu_len; 64 65 module_param(srp_sg_tablesize, int, 0444); 66 MODULE_PARM_DESC(srp_sg_tablesize, 67 "Max number of gather/scatter entries per I/O (default is 12, max 255)"); 68 69 static int topspin_workarounds = 1; 70 71 module_param(topspin_workarounds, int, 0444); 72 MODULE_PARM_DESC(topspin_workarounds, 73 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 74 75 static int mellanox_workarounds = 1; 76 77 module_param(mellanox_workarounds, int, 0444); 78 MODULE_PARM_DESC(mellanox_workarounds, 79 "Enable workarounds for Mellanox SRP target bugs if != 0"); 80 81 static void srp_add_one(struct ib_device *device); 82 static void srp_remove_one(struct ib_device *device); 83 static void srp_completion(struct ib_cq *cq, void *target_ptr); 84 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 85 86 static struct scsi_transport_template *ib_srp_transport_template; 87 88 static struct ib_client srp_client = { 89 .name = "srp", 90 .add = srp_add_one, 91 .remove = srp_remove_one 92 }; 93 94 static struct ib_sa_client srp_sa_client; 95 96 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 97 { 98 return (struct srp_target_port *) host->hostdata; 99 } 100 101 static const char *srp_target_info(struct Scsi_Host *host) 102 { 103 return host_to_target(host)->target_name; 104 } 105 106 static int srp_target_is_topspin(struct srp_target_port *target) 107 { 108 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 109 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 110 111 return topspin_workarounds && 112 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 113 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 114 } 115 116 static int srp_target_is_mellanox(struct srp_target_port *target) 117 { 118 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; 119 120 return mellanox_workarounds && 121 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui); 122 } 123 124 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 125 gfp_t gfp_mask, 126 enum dma_data_direction direction) 127 { 128 struct srp_iu *iu; 129 130 iu = kmalloc(sizeof *iu, gfp_mask); 131 if (!iu) 132 goto out; 133 134 iu->buf = kzalloc(size, gfp_mask); 135 if (!iu->buf) 136 goto out_free_iu; 137 138 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 139 direction); 140 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 141 goto out_free_buf; 142 143 iu->size = size; 144 iu->direction = direction; 145 146 return iu; 147 148 out_free_buf: 149 kfree(iu->buf); 150 out_free_iu: 151 kfree(iu); 152 out: 153 return NULL; 154 } 155 156 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 157 { 158 if (!iu) 159 return; 160 161 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 162 iu->direction); 163 kfree(iu->buf); 164 kfree(iu); 165 } 166 167 static void srp_qp_event(struct ib_event *event, void *context) 168 { 169 printk(KERN_ERR PFX "QP event %d\n", event->event); 170 } 171 172 static int srp_init_qp(struct srp_target_port *target, 173 struct ib_qp *qp) 174 { 175 struct ib_qp_attr *attr; 176 int ret; 177 178 attr = kmalloc(sizeof *attr, GFP_KERNEL); 179 if (!attr) 180 return -ENOMEM; 181 182 ret = ib_find_pkey(target->srp_host->srp_dev->dev, 183 target->srp_host->port, 184 be16_to_cpu(target->path.pkey), 185 &attr->pkey_index); 186 if (ret) 187 goto out; 188 189 attr->qp_state = IB_QPS_INIT; 190 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 191 IB_ACCESS_REMOTE_WRITE); 192 attr->port_num = target->srp_host->port; 193 194 ret = ib_modify_qp(qp, attr, 195 IB_QP_STATE | 196 IB_QP_PKEY_INDEX | 197 IB_QP_ACCESS_FLAGS | 198 IB_QP_PORT); 199 200 out: 201 kfree(attr); 202 return ret; 203 } 204 205 static int srp_new_cm_id(struct srp_target_port *target) 206 { 207 struct ib_cm_id *new_cm_id; 208 209 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 210 srp_cm_handler, target); 211 if (IS_ERR(new_cm_id)) 212 return PTR_ERR(new_cm_id); 213 214 if (target->cm_id) 215 ib_destroy_cm_id(target->cm_id); 216 target->cm_id = new_cm_id; 217 218 return 0; 219 } 220 221 static int srp_create_target_ib(struct srp_target_port *target) 222 { 223 struct ib_qp_init_attr *init_attr; 224 int ret; 225 226 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 227 if (!init_attr) 228 return -ENOMEM; 229 230 target->cq = ib_create_cq(target->srp_host->srp_dev->dev, 231 srp_completion, NULL, target, SRP_CQ_SIZE, 0); 232 if (IS_ERR(target->cq)) { 233 ret = PTR_ERR(target->cq); 234 goto out; 235 } 236 237 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 238 239 init_attr->event_handler = srp_qp_event; 240 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 241 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 242 init_attr->cap.max_recv_sge = 1; 243 init_attr->cap.max_send_sge = 1; 244 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 245 init_attr->qp_type = IB_QPT_RC; 246 init_attr->send_cq = target->cq; 247 init_attr->recv_cq = target->cq; 248 249 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); 250 if (IS_ERR(target->qp)) { 251 ret = PTR_ERR(target->qp); 252 ib_destroy_cq(target->cq); 253 goto out; 254 } 255 256 ret = srp_init_qp(target, target->qp); 257 if (ret) { 258 ib_destroy_qp(target->qp); 259 ib_destroy_cq(target->cq); 260 goto out; 261 } 262 263 out: 264 kfree(init_attr); 265 return ret; 266 } 267 268 static void srp_free_target_ib(struct srp_target_port *target) 269 { 270 int i; 271 272 ib_destroy_qp(target->qp); 273 ib_destroy_cq(target->cq); 274 275 for (i = 0; i < SRP_RQ_SIZE; ++i) 276 srp_free_iu(target->srp_host, target->rx_ring[i]); 277 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 278 srp_free_iu(target->srp_host, target->tx_ring[i]); 279 } 280 281 static void srp_path_rec_completion(int status, 282 struct ib_sa_path_rec *pathrec, 283 void *target_ptr) 284 { 285 struct srp_target_port *target = target_ptr; 286 287 target->status = status; 288 if (status) 289 shost_printk(KERN_ERR, target->scsi_host, 290 PFX "Got failed path rec status %d\n", status); 291 else 292 target->path = *pathrec; 293 complete(&target->done); 294 } 295 296 static int srp_lookup_path(struct srp_target_port *target) 297 { 298 target->path.numb_path = 1; 299 300 init_completion(&target->done); 301 302 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 303 target->srp_host->srp_dev->dev, 304 target->srp_host->port, 305 &target->path, 306 IB_SA_PATH_REC_SERVICE_ID | 307 IB_SA_PATH_REC_DGID | 308 IB_SA_PATH_REC_SGID | 309 IB_SA_PATH_REC_NUMB_PATH | 310 IB_SA_PATH_REC_PKEY, 311 SRP_PATH_REC_TIMEOUT_MS, 312 GFP_KERNEL, 313 srp_path_rec_completion, 314 target, &target->path_query); 315 if (target->path_query_id < 0) 316 return target->path_query_id; 317 318 wait_for_completion(&target->done); 319 320 if (target->status < 0) 321 shost_printk(KERN_WARNING, target->scsi_host, 322 PFX "Path record query failed\n"); 323 324 return target->status; 325 } 326 327 static int srp_send_req(struct srp_target_port *target) 328 { 329 struct { 330 struct ib_cm_req_param param; 331 struct srp_login_req priv; 332 } *req = NULL; 333 int status; 334 335 req = kzalloc(sizeof *req, GFP_KERNEL); 336 if (!req) 337 return -ENOMEM; 338 339 req->param.primary_path = &target->path; 340 req->param.alternate_path = NULL; 341 req->param.service_id = target->service_id; 342 req->param.qp_num = target->qp->qp_num; 343 req->param.qp_type = target->qp->qp_type; 344 req->param.private_data = &req->priv; 345 req->param.private_data_len = sizeof req->priv; 346 req->param.flow_control = 1; 347 348 get_random_bytes(&req->param.starting_psn, 4); 349 req->param.starting_psn &= 0xffffff; 350 351 /* 352 * Pick some arbitrary defaults here; we could make these 353 * module parameters if anyone cared about setting them. 354 */ 355 req->param.responder_resources = 4; 356 req->param.remote_cm_response_timeout = 20; 357 req->param.local_cm_response_timeout = 20; 358 req->param.retry_count = 7; 359 req->param.rnr_retry_count = 7; 360 req->param.max_cm_retries = 15; 361 362 req->priv.opcode = SRP_LOGIN_REQ; 363 req->priv.tag = 0; 364 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len); 365 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 366 SRP_BUF_FORMAT_INDIRECT); 367 /* 368 * In the published SRP specification (draft rev. 16a), the 369 * port identifier format is 8 bytes of ID extension followed 370 * by 8 bytes of GUID. Older drafts put the two halves in the 371 * opposite order, so that the GUID comes first. 372 * 373 * Targets conforming to these obsolete drafts can be 374 * recognized by the I/O Class they report. 375 */ 376 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 377 memcpy(req->priv.initiator_port_id, 378 &target->path.sgid.global.interface_id, 8); 379 memcpy(req->priv.initiator_port_id + 8, 380 &target->initiator_ext, 8); 381 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 382 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 383 } else { 384 memcpy(req->priv.initiator_port_id, 385 &target->initiator_ext, 8); 386 memcpy(req->priv.initiator_port_id + 8, 387 &target->path.sgid.global.interface_id, 8); 388 memcpy(req->priv.target_port_id, &target->id_ext, 8); 389 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 390 } 391 392 /* 393 * Topspin/Cisco SRP targets will reject our login unless we 394 * zero out the first 8 bytes of our initiator port ID and set 395 * the second 8 bytes to the local node GUID. 396 */ 397 if (srp_target_is_topspin(target)) { 398 shost_printk(KERN_DEBUG, target->scsi_host, 399 PFX "Topspin/Cisco initiator port ID workaround " 400 "activated for target GUID %016llx\n", 401 (unsigned long long) be64_to_cpu(target->ioc_guid)); 402 memset(req->priv.initiator_port_id, 0, 8); 403 memcpy(req->priv.initiator_port_id + 8, 404 &target->srp_host->srp_dev->dev->node_guid, 8); 405 } 406 407 status = ib_send_cm_req(target->cm_id, &req->param); 408 409 kfree(req); 410 411 return status; 412 } 413 414 static void srp_disconnect_target(struct srp_target_port *target) 415 { 416 /* XXX should send SRP_I_LOGOUT request */ 417 418 init_completion(&target->done); 419 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 420 shost_printk(KERN_DEBUG, target->scsi_host, 421 PFX "Sending CM DREQ failed\n"); 422 return; 423 } 424 wait_for_completion(&target->done); 425 } 426 427 static void srp_remove_work(struct work_struct *work) 428 { 429 struct srp_target_port *target = 430 container_of(work, struct srp_target_port, work); 431 432 spin_lock_irq(target->scsi_host->host_lock); 433 if (target->state != SRP_TARGET_DEAD) { 434 spin_unlock_irq(target->scsi_host->host_lock); 435 return; 436 } 437 target->state = SRP_TARGET_REMOVED; 438 spin_unlock_irq(target->scsi_host->host_lock); 439 440 spin_lock(&target->srp_host->target_lock); 441 list_del(&target->list); 442 spin_unlock(&target->srp_host->target_lock); 443 444 srp_remove_host(target->scsi_host); 445 scsi_remove_host(target->scsi_host); 446 ib_destroy_cm_id(target->cm_id); 447 srp_free_target_ib(target); 448 scsi_host_put(target->scsi_host); 449 } 450 451 static int srp_connect_target(struct srp_target_port *target) 452 { 453 int retries = 3; 454 int ret; 455 456 ret = srp_lookup_path(target); 457 if (ret) 458 return ret; 459 460 while (1) { 461 init_completion(&target->done); 462 ret = srp_send_req(target); 463 if (ret) 464 return ret; 465 wait_for_completion(&target->done); 466 467 /* 468 * The CM event handling code will set status to 469 * SRP_PORT_REDIRECT if we get a port redirect REJ 470 * back, or SRP_DLID_REDIRECT if we get a lid/qp 471 * redirect REJ back. 472 */ 473 switch (target->status) { 474 case 0: 475 return 0; 476 477 case SRP_PORT_REDIRECT: 478 ret = srp_lookup_path(target); 479 if (ret) 480 return ret; 481 break; 482 483 case SRP_DLID_REDIRECT: 484 break; 485 486 case SRP_STALE_CONN: 487 /* Our current CM id was stale, and is now in timewait. 488 * Try to reconnect with a new one. 489 */ 490 if (!retries-- || srp_new_cm_id(target)) { 491 shost_printk(KERN_ERR, target->scsi_host, PFX 492 "giving up on stale connection\n"); 493 target->status = -ECONNRESET; 494 return target->status; 495 } 496 497 shost_printk(KERN_ERR, target->scsi_host, PFX 498 "retrying stale connection\n"); 499 break; 500 501 default: 502 return target->status; 503 } 504 } 505 } 506 507 static void srp_unmap_data(struct scsi_cmnd *scmnd, 508 struct srp_target_port *target, 509 struct srp_request *req) 510 { 511 if (!scsi_sglist(scmnd) || 512 (scmnd->sc_data_direction != DMA_TO_DEVICE && 513 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 514 return; 515 516 if (req->fmr) { 517 ib_fmr_pool_unmap(req->fmr); 518 req->fmr = NULL; 519 } 520 521 ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd), 522 scsi_sg_count(scmnd), scmnd->sc_data_direction); 523 } 524 525 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 526 { 527 srp_unmap_data(req->scmnd, target, req); 528 list_move_tail(&req->list, &target->free_reqs); 529 } 530 531 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 532 { 533 req->scmnd->result = DID_RESET << 16; 534 req->scmnd->scsi_done(req->scmnd); 535 srp_remove_req(target, req); 536 } 537 538 static int srp_reconnect_target(struct srp_target_port *target) 539 { 540 struct ib_qp_attr qp_attr; 541 struct srp_request *req, *tmp; 542 struct ib_wc wc; 543 int ret; 544 545 spin_lock_irq(target->scsi_host->host_lock); 546 if (target->state != SRP_TARGET_LIVE) { 547 spin_unlock_irq(target->scsi_host->host_lock); 548 return -EAGAIN; 549 } 550 target->state = SRP_TARGET_CONNECTING; 551 spin_unlock_irq(target->scsi_host->host_lock); 552 553 srp_disconnect_target(target); 554 /* 555 * Now get a new local CM ID so that we avoid confusing the 556 * target in case things are really fouled up. 557 */ 558 ret = srp_new_cm_id(target); 559 if (ret) 560 goto err; 561 562 qp_attr.qp_state = IB_QPS_RESET; 563 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 564 if (ret) 565 goto err; 566 567 ret = srp_init_qp(target, target->qp); 568 if (ret) 569 goto err; 570 571 while (ib_poll_cq(target->cq, 1, &wc) > 0) 572 ; /* nothing */ 573 574 spin_lock_irq(target->scsi_host->host_lock); 575 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 576 srp_reset_req(target, req); 577 spin_unlock_irq(target->scsi_host->host_lock); 578 579 target->rx_head = 0; 580 target->tx_head = 0; 581 target->tx_tail = 0; 582 583 target->qp_in_error = 0; 584 ret = srp_connect_target(target); 585 if (ret) 586 goto err; 587 588 spin_lock_irq(target->scsi_host->host_lock); 589 if (target->state == SRP_TARGET_CONNECTING) { 590 ret = 0; 591 target->state = SRP_TARGET_LIVE; 592 } else 593 ret = -EAGAIN; 594 spin_unlock_irq(target->scsi_host->host_lock); 595 596 return ret; 597 598 err: 599 shost_printk(KERN_ERR, target->scsi_host, 600 PFX "reconnect failed (%d), removing target port.\n", ret); 601 602 /* 603 * We couldn't reconnect, so kill our target port off. 604 * However, we have to defer the real removal because we might 605 * be in the context of the SCSI error handler now, which 606 * would deadlock if we call scsi_remove_host(). 607 */ 608 spin_lock_irq(target->scsi_host->host_lock); 609 if (target->state == SRP_TARGET_CONNECTING) { 610 target->state = SRP_TARGET_DEAD; 611 INIT_WORK(&target->work, srp_remove_work); 612 schedule_work(&target->work); 613 } 614 spin_unlock_irq(target->scsi_host->host_lock); 615 616 return ret; 617 } 618 619 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, 620 int sg_cnt, struct srp_request *req, 621 struct srp_direct_buf *buf) 622 { 623 u64 io_addr = 0; 624 u64 *dma_pages; 625 u32 len; 626 int page_cnt; 627 int i, j; 628 int ret; 629 struct srp_device *dev = target->srp_host->srp_dev; 630 struct ib_device *ibdev = dev->dev; 631 struct scatterlist *sg; 632 633 if (!dev->fmr_pool) 634 return -ENODEV; 635 636 if (srp_target_is_mellanox(target) && 637 (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask)) 638 return -EINVAL; 639 640 len = page_cnt = 0; 641 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 642 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 643 644 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { 645 if (i > 0) 646 return -EINVAL; 647 else 648 ++page_cnt; 649 } 650 if ((ib_sg_dma_address(ibdev, sg) + dma_len) & 651 ~dev->fmr_page_mask) { 652 if (i < sg_cnt - 1) 653 return -EINVAL; 654 else 655 ++page_cnt; 656 } 657 658 len += dma_len; 659 } 660 661 page_cnt += len >> dev->fmr_page_shift; 662 if (page_cnt > SRP_FMR_SIZE) 663 return -ENOMEM; 664 665 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); 666 if (!dma_pages) 667 return -ENOMEM; 668 669 page_cnt = 0; 670 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 671 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 672 673 for (j = 0; j < dma_len; j += dev->fmr_page_size) 674 dma_pages[page_cnt++] = 675 (ib_sg_dma_address(ibdev, sg) & 676 dev->fmr_page_mask) + j; 677 } 678 679 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 680 dma_pages, page_cnt, io_addr); 681 if (IS_ERR(req->fmr)) { 682 ret = PTR_ERR(req->fmr); 683 req->fmr = NULL; 684 goto out; 685 } 686 687 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 688 ~dev->fmr_page_mask); 689 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 690 buf->len = cpu_to_be32(len); 691 692 ret = 0; 693 694 out: 695 kfree(dma_pages); 696 697 return ret; 698 } 699 700 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 701 struct srp_request *req) 702 { 703 struct scatterlist *scat; 704 struct srp_cmd *cmd = req->cmd->buf; 705 int len, nents, count; 706 u8 fmt = SRP_DATA_DESC_DIRECT; 707 struct srp_device *dev; 708 struct ib_device *ibdev; 709 710 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 711 return sizeof (struct srp_cmd); 712 713 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 714 scmnd->sc_data_direction != DMA_TO_DEVICE) { 715 shost_printk(KERN_WARNING, target->scsi_host, 716 PFX "Unhandled data direction %d\n", 717 scmnd->sc_data_direction); 718 return -EINVAL; 719 } 720 721 nents = scsi_sg_count(scmnd); 722 scat = scsi_sglist(scmnd); 723 724 dev = target->srp_host->srp_dev; 725 ibdev = dev->dev; 726 727 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 728 729 fmt = SRP_DATA_DESC_DIRECT; 730 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 731 732 if (count == 1) { 733 /* 734 * The midlayer only generated a single gather/scatter 735 * entry, or DMA mapping coalesced everything to a 736 * single entry. So a direct descriptor along with 737 * the DMA MR suffices. 738 */ 739 struct srp_direct_buf *buf = (void *) cmd->add_data; 740 741 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 742 buf->key = cpu_to_be32(dev->mr->rkey); 743 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 744 } else if (srp_map_fmr(target, scat, count, req, 745 (void *) cmd->add_data)) { 746 /* 747 * FMR mapping failed, and the scatterlist has more 748 * than one entry. Generate an indirect memory 749 * descriptor. 750 */ 751 struct srp_indirect_buf *buf = (void *) cmd->add_data; 752 struct scatterlist *sg; 753 u32 datalen = 0; 754 int i; 755 756 fmt = SRP_DATA_DESC_INDIRECT; 757 len = sizeof (struct srp_cmd) + 758 sizeof (struct srp_indirect_buf) + 759 count * sizeof (struct srp_direct_buf); 760 761 scsi_for_each_sg(scmnd, sg, count, i) { 762 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 763 764 buf->desc_list[i].va = 765 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 766 buf->desc_list[i].key = 767 cpu_to_be32(dev->mr->rkey); 768 buf->desc_list[i].len = cpu_to_be32(dma_len); 769 datalen += dma_len; 770 } 771 772 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 773 cmd->data_out_desc_cnt = count; 774 else 775 cmd->data_in_desc_cnt = count; 776 777 buf->table_desc.va = 778 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 779 buf->table_desc.key = 780 cpu_to_be32(target->srp_host->srp_dev->mr->rkey); 781 buf->table_desc.len = 782 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 783 784 buf->len = cpu_to_be32(datalen); 785 } 786 787 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 788 cmd->buf_fmt = fmt << 4; 789 else 790 cmd->buf_fmt = fmt; 791 792 return len; 793 } 794 795 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 796 { 797 struct srp_request *req; 798 struct scsi_cmnd *scmnd; 799 unsigned long flags; 800 s32 delta; 801 802 delta = (s32) be32_to_cpu(rsp->req_lim_delta); 803 804 spin_lock_irqsave(target->scsi_host->host_lock, flags); 805 806 target->req_lim += delta; 807 808 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; 809 810 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 811 if (be32_to_cpu(rsp->resp_data_len) < 4) 812 req->tsk_status = -1; 813 else 814 req->tsk_status = rsp->data[3]; 815 complete(&req->done); 816 } else { 817 scmnd = req->scmnd; 818 if (!scmnd) 819 shost_printk(KERN_ERR, target->scsi_host, 820 "Null scmnd for RSP w/tag %016llx\n", 821 (unsigned long long) rsp->tag); 822 scmnd->result = rsp->status; 823 824 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 825 memcpy(scmnd->sense_buffer, rsp->data + 826 be32_to_cpu(rsp->resp_data_len), 827 min_t(int, be32_to_cpu(rsp->sense_data_len), 828 SCSI_SENSE_BUFFERSIZE)); 829 } 830 831 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 832 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 833 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 834 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 835 836 if (!req->tsk_mgmt) { 837 scmnd->host_scribble = (void *) -1L; 838 scmnd->scsi_done(scmnd); 839 840 srp_remove_req(target, req); 841 } else 842 req->cmd_done = 1; 843 } 844 845 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 846 } 847 848 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 849 { 850 struct ib_device *dev; 851 struct srp_iu *iu; 852 u8 opcode; 853 854 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 855 856 dev = target->srp_host->srp_dev->dev; 857 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 858 DMA_FROM_DEVICE); 859 860 opcode = *(u8 *) iu->buf; 861 862 if (0) { 863 int i; 864 865 shost_printk(KERN_ERR, target->scsi_host, 866 PFX "recv completion, opcode 0x%02x\n", opcode); 867 868 for (i = 0; i < wc->byte_len; ++i) { 869 if (i % 8 == 0) 870 printk(KERN_ERR " [%02x] ", i); 871 printk(" %02x", ((u8 *) iu->buf)[i]); 872 if ((i + 1) % 8 == 0) 873 printk("\n"); 874 } 875 876 if (wc->byte_len % 8) 877 printk("\n"); 878 } 879 880 switch (opcode) { 881 case SRP_RSP: 882 srp_process_rsp(target, iu->buf); 883 break; 884 885 case SRP_T_LOGOUT: 886 /* XXX Handle target logout */ 887 shost_printk(KERN_WARNING, target->scsi_host, 888 PFX "Got target logout request\n"); 889 break; 890 891 default: 892 shost_printk(KERN_WARNING, target->scsi_host, 893 PFX "Unhandled SRP opcode 0x%02x\n", opcode); 894 break; 895 } 896 897 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 898 DMA_FROM_DEVICE); 899 } 900 901 static void srp_completion(struct ib_cq *cq, void *target_ptr) 902 { 903 struct srp_target_port *target = target_ptr; 904 struct ib_wc wc; 905 906 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 907 while (ib_poll_cq(cq, 1, &wc) > 0) { 908 if (wc.status) { 909 shost_printk(KERN_ERR, target->scsi_host, 910 PFX "failed %s status %d\n", 911 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 912 wc.status); 913 target->qp_in_error = 1; 914 break; 915 } 916 917 if (wc.wr_id & SRP_OP_RECV) 918 srp_handle_recv(target, &wc); 919 else 920 ++target->tx_tail; 921 } 922 } 923 924 static int __srp_post_recv(struct srp_target_port *target) 925 { 926 struct srp_iu *iu; 927 struct ib_sge list; 928 struct ib_recv_wr wr, *bad_wr; 929 unsigned int next; 930 int ret; 931 932 next = target->rx_head & (SRP_RQ_SIZE - 1); 933 wr.wr_id = next | SRP_OP_RECV; 934 iu = target->rx_ring[next]; 935 936 list.addr = iu->dma; 937 list.length = iu->size; 938 list.lkey = target->srp_host->srp_dev->mr->lkey; 939 940 wr.next = NULL; 941 wr.sg_list = &list; 942 wr.num_sge = 1; 943 944 ret = ib_post_recv(target->qp, &wr, &bad_wr); 945 if (!ret) 946 ++target->rx_head; 947 948 return ret; 949 } 950 951 static int srp_post_recv(struct srp_target_port *target) 952 { 953 unsigned long flags; 954 int ret; 955 956 spin_lock_irqsave(target->scsi_host->host_lock, flags); 957 ret = __srp_post_recv(target); 958 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 959 960 return ret; 961 } 962 963 /* 964 * Must be called with target->scsi_host->host_lock held to protect 965 * req_lim and tx_head. Lock cannot be dropped between call here and 966 * call to __srp_post_send(). 967 */ 968 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 969 enum srp_request_type req_type) 970 { 971 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; 972 973 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 974 return NULL; 975 976 if (target->req_lim < min) { 977 ++target->zero_req_lim; 978 return NULL; 979 } 980 981 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 982 } 983 984 /* 985 * Must be called with target->scsi_host->host_lock held to protect 986 * req_lim and tx_head. 987 */ 988 static int __srp_post_send(struct srp_target_port *target, 989 struct srp_iu *iu, int len) 990 { 991 struct ib_sge list; 992 struct ib_send_wr wr, *bad_wr; 993 int ret = 0; 994 995 list.addr = iu->dma; 996 list.length = len; 997 list.lkey = target->srp_host->srp_dev->mr->lkey; 998 999 wr.next = NULL; 1000 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 1001 wr.sg_list = &list; 1002 wr.num_sge = 1; 1003 wr.opcode = IB_WR_SEND; 1004 wr.send_flags = IB_SEND_SIGNALED; 1005 1006 ret = ib_post_send(target->qp, &wr, &bad_wr); 1007 1008 if (!ret) { 1009 ++target->tx_head; 1010 --target->req_lim; 1011 } 1012 1013 return ret; 1014 } 1015 1016 static int srp_queuecommand(struct scsi_cmnd *scmnd, 1017 void (*done)(struct scsi_cmnd *)) 1018 { 1019 struct srp_target_port *target = host_to_target(scmnd->device->host); 1020 struct srp_request *req; 1021 struct srp_iu *iu; 1022 struct srp_cmd *cmd; 1023 struct ib_device *dev; 1024 int len; 1025 1026 if (target->state == SRP_TARGET_CONNECTING) 1027 goto err; 1028 1029 if (target->state == SRP_TARGET_DEAD || 1030 target->state == SRP_TARGET_REMOVED) { 1031 scmnd->result = DID_BAD_TARGET << 16; 1032 done(scmnd); 1033 return 0; 1034 } 1035 1036 iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL); 1037 if (!iu) 1038 goto err; 1039 1040 dev = target->srp_host->srp_dev->dev; 1041 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1042 DMA_TO_DEVICE); 1043 1044 req = list_entry(target->free_reqs.next, struct srp_request, list); 1045 1046 scmnd->scsi_done = done; 1047 scmnd->result = 0; 1048 scmnd->host_scribble = (void *) (long) req->index; 1049 1050 cmd = iu->buf; 1051 memset(cmd, 0, sizeof *cmd); 1052 1053 cmd->opcode = SRP_CMD; 1054 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1055 cmd->tag = req->index; 1056 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 1057 1058 req->scmnd = scmnd; 1059 req->cmd = iu; 1060 req->cmd_done = 0; 1061 req->tsk_mgmt = NULL; 1062 1063 len = srp_map_data(scmnd, target, req); 1064 if (len < 0) { 1065 shost_printk(KERN_ERR, target->scsi_host, 1066 PFX "Failed to map data\n"); 1067 goto err; 1068 } 1069 1070 if (__srp_post_recv(target)) { 1071 shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n"); 1072 goto err_unmap; 1073 } 1074 1075 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1076 DMA_TO_DEVICE); 1077 1078 if (__srp_post_send(target, iu, len)) { 1079 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 1080 goto err_unmap; 1081 } 1082 1083 list_move_tail(&req->list, &target->req_queue); 1084 1085 return 0; 1086 1087 err_unmap: 1088 srp_unmap_data(scmnd, target, req); 1089 1090 err: 1091 return SCSI_MLQUEUE_HOST_BUSY; 1092 } 1093 1094 static int srp_alloc_iu_bufs(struct srp_target_port *target) 1095 { 1096 int i; 1097 1098 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1099 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 1100 target->max_ti_iu_len, 1101 GFP_KERNEL, DMA_FROM_DEVICE); 1102 if (!target->rx_ring[i]) 1103 goto err; 1104 } 1105 1106 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1107 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1108 srp_max_iu_len, 1109 GFP_KERNEL, DMA_TO_DEVICE); 1110 if (!target->tx_ring[i]) 1111 goto err; 1112 } 1113 1114 return 0; 1115 1116 err: 1117 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1118 srp_free_iu(target->srp_host, target->rx_ring[i]); 1119 target->rx_ring[i] = NULL; 1120 } 1121 1122 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1123 srp_free_iu(target->srp_host, target->tx_ring[i]); 1124 target->tx_ring[i] = NULL; 1125 } 1126 1127 return -ENOMEM; 1128 } 1129 1130 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 1131 struct ib_cm_event *event, 1132 struct srp_target_port *target) 1133 { 1134 struct Scsi_Host *shost = target->scsi_host; 1135 struct ib_class_port_info *cpi; 1136 int opcode; 1137 1138 switch (event->param.rej_rcvd.reason) { 1139 case IB_CM_REJ_PORT_CM_REDIRECT: 1140 cpi = event->param.rej_rcvd.ari; 1141 target->path.dlid = cpi->redirect_lid; 1142 target->path.pkey = cpi->redirect_pkey; 1143 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 1144 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 1145 1146 target->status = target->path.dlid ? 1147 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 1148 break; 1149 1150 case IB_CM_REJ_PORT_REDIRECT: 1151 if (srp_target_is_topspin(target)) { 1152 /* 1153 * Topspin/Cisco SRP gateways incorrectly send 1154 * reject reason code 25 when they mean 24 1155 * (port redirect). 1156 */ 1157 memcpy(target->path.dgid.raw, 1158 event->param.rej_rcvd.ari, 16); 1159 1160 shost_printk(KERN_DEBUG, shost, 1161 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1162 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1163 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1164 1165 target->status = SRP_PORT_REDIRECT; 1166 } else { 1167 shost_printk(KERN_WARNING, shost, 1168 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1169 target->status = -ECONNRESET; 1170 } 1171 break; 1172 1173 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1174 shost_printk(KERN_WARNING, shost, 1175 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1176 target->status = -ECONNRESET; 1177 break; 1178 1179 case IB_CM_REJ_CONSUMER_DEFINED: 1180 opcode = *(u8 *) event->private_data; 1181 if (opcode == SRP_LOGIN_REJ) { 1182 struct srp_login_rej *rej = event->private_data; 1183 u32 reason = be32_to_cpu(rej->reason); 1184 1185 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1186 shost_printk(KERN_WARNING, shost, 1187 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1188 else 1189 shost_printk(KERN_WARNING, shost, 1190 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1191 } else 1192 shost_printk(KERN_WARNING, shost, 1193 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1194 " opcode 0x%02x\n", opcode); 1195 target->status = -ECONNRESET; 1196 break; 1197 1198 case IB_CM_REJ_STALE_CONN: 1199 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 1200 target->status = SRP_STALE_CONN; 1201 break; 1202 1203 default: 1204 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 1205 event->param.rej_rcvd.reason); 1206 target->status = -ECONNRESET; 1207 } 1208 } 1209 1210 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1211 { 1212 struct srp_target_port *target = cm_id->context; 1213 struct ib_qp_attr *qp_attr = NULL; 1214 int attr_mask = 0; 1215 int comp = 0; 1216 int opcode = 0; 1217 1218 switch (event->event) { 1219 case IB_CM_REQ_ERROR: 1220 shost_printk(KERN_DEBUG, target->scsi_host, 1221 PFX "Sending CM REQ failed\n"); 1222 comp = 1; 1223 target->status = -ECONNRESET; 1224 break; 1225 1226 case IB_CM_REP_RECEIVED: 1227 comp = 1; 1228 opcode = *(u8 *) event->private_data; 1229 1230 if (opcode == SRP_LOGIN_RSP) { 1231 struct srp_login_rsp *rsp = event->private_data; 1232 1233 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1234 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1235 1236 target->scsi_host->can_queue = min(target->req_lim, 1237 target->scsi_host->can_queue); 1238 } else { 1239 shost_printk(KERN_WARNING, target->scsi_host, 1240 PFX "Unhandled RSP opcode %#x\n", opcode); 1241 target->status = -ECONNRESET; 1242 break; 1243 } 1244 1245 if (!target->rx_ring[0]) { 1246 target->status = srp_alloc_iu_bufs(target); 1247 if (target->status) 1248 break; 1249 } 1250 1251 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1252 if (!qp_attr) { 1253 target->status = -ENOMEM; 1254 break; 1255 } 1256 1257 qp_attr->qp_state = IB_QPS_RTR; 1258 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1259 if (target->status) 1260 break; 1261 1262 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1263 if (target->status) 1264 break; 1265 1266 target->status = srp_post_recv(target); 1267 if (target->status) 1268 break; 1269 1270 qp_attr->qp_state = IB_QPS_RTS; 1271 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1272 if (target->status) 1273 break; 1274 1275 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1276 if (target->status) 1277 break; 1278 1279 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1280 if (target->status) 1281 break; 1282 1283 break; 1284 1285 case IB_CM_REJ_RECEIVED: 1286 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 1287 comp = 1; 1288 1289 srp_cm_rej_handler(cm_id, event, target); 1290 break; 1291 1292 case IB_CM_DREQ_RECEIVED: 1293 shost_printk(KERN_WARNING, target->scsi_host, 1294 PFX "DREQ received - connection closed\n"); 1295 if (ib_send_cm_drep(cm_id, NULL, 0)) 1296 shost_printk(KERN_ERR, target->scsi_host, 1297 PFX "Sending CM DREP failed\n"); 1298 break; 1299 1300 case IB_CM_TIMEWAIT_EXIT: 1301 shost_printk(KERN_ERR, target->scsi_host, 1302 PFX "connection closed\n"); 1303 1304 comp = 1; 1305 target->status = 0; 1306 break; 1307 1308 case IB_CM_MRA_RECEIVED: 1309 case IB_CM_DREQ_ERROR: 1310 case IB_CM_DREP_RECEIVED: 1311 break; 1312 1313 default: 1314 shost_printk(KERN_WARNING, target->scsi_host, 1315 PFX "Unhandled CM event %d\n", event->event); 1316 break; 1317 } 1318 1319 if (comp) 1320 complete(&target->done); 1321 1322 kfree(qp_attr); 1323 1324 return 0; 1325 } 1326 1327 static int srp_send_tsk_mgmt(struct srp_target_port *target, 1328 struct srp_request *req, u8 func) 1329 { 1330 struct srp_iu *iu; 1331 struct srp_tsk_mgmt *tsk_mgmt; 1332 1333 spin_lock_irq(target->scsi_host->host_lock); 1334 1335 if (target->state == SRP_TARGET_DEAD || 1336 target->state == SRP_TARGET_REMOVED) { 1337 req->scmnd->result = DID_BAD_TARGET << 16; 1338 goto out; 1339 } 1340 1341 init_completion(&req->done); 1342 1343 iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT); 1344 if (!iu) 1345 goto out; 1346 1347 tsk_mgmt = iu->buf; 1348 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1349 1350 tsk_mgmt->opcode = SRP_TSK_MGMT; 1351 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1352 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1353 tsk_mgmt->tsk_mgmt_func = func; 1354 tsk_mgmt->task_tag = req->index; 1355 1356 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1357 goto out; 1358 1359 req->tsk_mgmt = iu; 1360 1361 spin_unlock_irq(target->scsi_host->host_lock); 1362 1363 if (!wait_for_completion_timeout(&req->done, 1364 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1365 return -1; 1366 1367 return 0; 1368 1369 out: 1370 spin_unlock_irq(target->scsi_host->host_lock); 1371 return -1; 1372 } 1373 1374 static int srp_find_req(struct srp_target_port *target, 1375 struct scsi_cmnd *scmnd, 1376 struct srp_request **req) 1377 { 1378 if (scmnd->host_scribble == (void *) -1L) 1379 return -1; 1380 1381 *req = &target->req_ring[(long) scmnd->host_scribble]; 1382 1383 return 0; 1384 } 1385 1386 static int srp_abort(struct scsi_cmnd *scmnd) 1387 { 1388 struct srp_target_port *target = host_to_target(scmnd->device->host); 1389 struct srp_request *req; 1390 int ret = SUCCESS; 1391 1392 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1393 1394 if (target->qp_in_error) 1395 return FAILED; 1396 if (srp_find_req(target, scmnd, &req)) 1397 return FAILED; 1398 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1399 return FAILED; 1400 1401 spin_lock_irq(target->scsi_host->host_lock); 1402 1403 if (req->cmd_done) { 1404 srp_remove_req(target, req); 1405 scmnd->scsi_done(scmnd); 1406 } else if (!req->tsk_status) { 1407 srp_remove_req(target, req); 1408 scmnd->result = DID_ABORT << 16; 1409 } else 1410 ret = FAILED; 1411 1412 spin_unlock_irq(target->scsi_host->host_lock); 1413 1414 return ret; 1415 } 1416 1417 static int srp_reset_device(struct scsi_cmnd *scmnd) 1418 { 1419 struct srp_target_port *target = host_to_target(scmnd->device->host); 1420 struct srp_request *req, *tmp; 1421 1422 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 1423 1424 if (target->qp_in_error) 1425 return FAILED; 1426 if (srp_find_req(target, scmnd, &req)) 1427 return FAILED; 1428 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1429 return FAILED; 1430 if (req->tsk_status) 1431 return FAILED; 1432 1433 spin_lock_irq(target->scsi_host->host_lock); 1434 1435 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1436 if (req->scmnd->device == scmnd->device) 1437 srp_reset_req(target, req); 1438 1439 spin_unlock_irq(target->scsi_host->host_lock); 1440 1441 return SUCCESS; 1442 } 1443 1444 static int srp_reset_host(struct scsi_cmnd *scmnd) 1445 { 1446 struct srp_target_port *target = host_to_target(scmnd->device->host); 1447 int ret = FAILED; 1448 1449 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 1450 1451 if (!srp_reconnect_target(target)) 1452 ret = SUCCESS; 1453 1454 return ret; 1455 } 1456 1457 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 1458 char *buf) 1459 { 1460 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1461 1462 if (target->state == SRP_TARGET_DEAD || 1463 target->state == SRP_TARGET_REMOVED) 1464 return -ENODEV; 1465 1466 return sprintf(buf, "0x%016llx\n", 1467 (unsigned long long) be64_to_cpu(target->id_ext)); 1468 } 1469 1470 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 1471 char *buf) 1472 { 1473 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1474 1475 if (target->state == SRP_TARGET_DEAD || 1476 target->state == SRP_TARGET_REMOVED) 1477 return -ENODEV; 1478 1479 return sprintf(buf, "0x%016llx\n", 1480 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1481 } 1482 1483 static ssize_t show_service_id(struct device *dev, 1484 struct device_attribute *attr, char *buf) 1485 { 1486 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1487 1488 if (target->state == SRP_TARGET_DEAD || 1489 target->state == SRP_TARGET_REMOVED) 1490 return -ENODEV; 1491 1492 return sprintf(buf, "0x%016llx\n", 1493 (unsigned long long) be64_to_cpu(target->service_id)); 1494 } 1495 1496 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 1497 char *buf) 1498 { 1499 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1500 1501 if (target->state == SRP_TARGET_DEAD || 1502 target->state == SRP_TARGET_REMOVED) 1503 return -ENODEV; 1504 1505 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1506 } 1507 1508 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 1509 char *buf) 1510 { 1511 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1512 1513 if (target->state == SRP_TARGET_DEAD || 1514 target->state == SRP_TARGET_REMOVED) 1515 return -ENODEV; 1516 1517 return sprintf(buf, "%pI6\n", target->path.dgid.raw); 1518 } 1519 1520 static ssize_t show_orig_dgid(struct device *dev, 1521 struct device_attribute *attr, char *buf) 1522 { 1523 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1524 1525 if (target->state == SRP_TARGET_DEAD || 1526 target->state == SRP_TARGET_REMOVED) 1527 return -ENODEV; 1528 1529 return sprintf(buf, "%pI6\n", target->orig_dgid); 1530 } 1531 1532 static ssize_t show_zero_req_lim(struct device *dev, 1533 struct device_attribute *attr, char *buf) 1534 { 1535 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1536 1537 if (target->state == SRP_TARGET_DEAD || 1538 target->state == SRP_TARGET_REMOVED) 1539 return -ENODEV; 1540 1541 return sprintf(buf, "%d\n", target->zero_req_lim); 1542 } 1543 1544 static ssize_t show_local_ib_port(struct device *dev, 1545 struct device_attribute *attr, char *buf) 1546 { 1547 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1548 1549 return sprintf(buf, "%d\n", target->srp_host->port); 1550 } 1551 1552 static ssize_t show_local_ib_device(struct device *dev, 1553 struct device_attribute *attr, char *buf) 1554 { 1555 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1556 1557 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 1558 } 1559 1560 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1561 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1562 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1563 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1564 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1565 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 1566 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1567 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 1568 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 1569 1570 static struct device_attribute *srp_host_attrs[] = { 1571 &dev_attr_id_ext, 1572 &dev_attr_ioc_guid, 1573 &dev_attr_service_id, 1574 &dev_attr_pkey, 1575 &dev_attr_dgid, 1576 &dev_attr_orig_dgid, 1577 &dev_attr_zero_req_lim, 1578 &dev_attr_local_ib_port, 1579 &dev_attr_local_ib_device, 1580 NULL 1581 }; 1582 1583 static struct scsi_host_template srp_template = { 1584 .module = THIS_MODULE, 1585 .name = "InfiniBand SRP initiator", 1586 .proc_name = DRV_NAME, 1587 .info = srp_target_info, 1588 .queuecommand = srp_queuecommand, 1589 .eh_abort_handler = srp_abort, 1590 .eh_device_reset_handler = srp_reset_device, 1591 .eh_host_reset_handler = srp_reset_host, 1592 .can_queue = SRP_SQ_SIZE, 1593 .this_id = -1, 1594 .cmd_per_lun = SRP_SQ_SIZE, 1595 .use_clustering = ENABLE_CLUSTERING, 1596 .shost_attrs = srp_host_attrs 1597 }; 1598 1599 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1600 { 1601 struct srp_rport_identifiers ids; 1602 struct srp_rport *rport; 1603 1604 sprintf(target->target_name, "SRP.T10:%016llX", 1605 (unsigned long long) be64_to_cpu(target->id_ext)); 1606 1607 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 1608 return -ENODEV; 1609 1610 memcpy(ids.port_id, &target->id_ext, 8); 1611 memcpy(ids.port_id + 8, &target->ioc_guid, 8); 1612 ids.roles = SRP_RPORT_ROLE_TARGET; 1613 rport = srp_rport_add(target->scsi_host, &ids); 1614 if (IS_ERR(rport)) { 1615 scsi_remove_host(target->scsi_host); 1616 return PTR_ERR(rport); 1617 } 1618 1619 spin_lock(&host->target_lock); 1620 list_add_tail(&target->list, &host->target_list); 1621 spin_unlock(&host->target_lock); 1622 1623 target->state = SRP_TARGET_LIVE; 1624 1625 scsi_scan_target(&target->scsi_host->shost_gendev, 1626 0, target->scsi_id, SCAN_WILD_CARD, 0); 1627 1628 return 0; 1629 } 1630 1631 static void srp_release_dev(struct device *dev) 1632 { 1633 struct srp_host *host = 1634 container_of(dev, struct srp_host, dev); 1635 1636 complete(&host->released); 1637 } 1638 1639 static struct class srp_class = { 1640 .name = "infiniband_srp", 1641 .dev_release = srp_release_dev 1642 }; 1643 1644 /* 1645 * Target ports are added by writing 1646 * 1647 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1648 * pkey=<P_Key>,service_id=<service ID> 1649 * 1650 * to the add_target sysfs attribute. 1651 */ 1652 enum { 1653 SRP_OPT_ERR = 0, 1654 SRP_OPT_ID_EXT = 1 << 0, 1655 SRP_OPT_IOC_GUID = 1 << 1, 1656 SRP_OPT_DGID = 1 << 2, 1657 SRP_OPT_PKEY = 1 << 3, 1658 SRP_OPT_SERVICE_ID = 1 << 4, 1659 SRP_OPT_MAX_SECT = 1 << 5, 1660 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 1661 SRP_OPT_IO_CLASS = 1 << 7, 1662 SRP_OPT_INITIATOR_EXT = 1 << 8, 1663 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1664 SRP_OPT_IOC_GUID | 1665 SRP_OPT_DGID | 1666 SRP_OPT_PKEY | 1667 SRP_OPT_SERVICE_ID), 1668 }; 1669 1670 static const match_table_t srp_opt_tokens = { 1671 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1672 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1673 { SRP_OPT_DGID, "dgid=%s" }, 1674 { SRP_OPT_PKEY, "pkey=%x" }, 1675 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1676 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1677 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 1678 { SRP_OPT_IO_CLASS, "io_class=%x" }, 1679 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 1680 { SRP_OPT_ERR, NULL } 1681 }; 1682 1683 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1684 { 1685 char *options, *sep_opt; 1686 char *p; 1687 char dgid[3]; 1688 substring_t args[MAX_OPT_ARGS]; 1689 int opt_mask = 0; 1690 int token; 1691 int ret = -EINVAL; 1692 int i; 1693 1694 options = kstrdup(buf, GFP_KERNEL); 1695 if (!options) 1696 return -ENOMEM; 1697 1698 sep_opt = options; 1699 while ((p = strsep(&sep_opt, ",")) != NULL) { 1700 if (!*p) 1701 continue; 1702 1703 token = match_token(p, srp_opt_tokens, args); 1704 opt_mask |= token; 1705 1706 switch (token) { 1707 case SRP_OPT_ID_EXT: 1708 p = match_strdup(args); 1709 if (!p) { 1710 ret = -ENOMEM; 1711 goto out; 1712 } 1713 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1714 kfree(p); 1715 break; 1716 1717 case SRP_OPT_IOC_GUID: 1718 p = match_strdup(args); 1719 if (!p) { 1720 ret = -ENOMEM; 1721 goto out; 1722 } 1723 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1724 kfree(p); 1725 break; 1726 1727 case SRP_OPT_DGID: 1728 p = match_strdup(args); 1729 if (!p) { 1730 ret = -ENOMEM; 1731 goto out; 1732 } 1733 if (strlen(p) != 32) { 1734 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1735 kfree(p); 1736 goto out; 1737 } 1738 1739 for (i = 0; i < 16; ++i) { 1740 strlcpy(dgid, p + i * 2, 3); 1741 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1742 } 1743 kfree(p); 1744 memcpy(target->orig_dgid, target->path.dgid.raw, 16); 1745 break; 1746 1747 case SRP_OPT_PKEY: 1748 if (match_hex(args, &token)) { 1749 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1750 goto out; 1751 } 1752 target->path.pkey = cpu_to_be16(token); 1753 break; 1754 1755 case SRP_OPT_SERVICE_ID: 1756 p = match_strdup(args); 1757 if (!p) { 1758 ret = -ENOMEM; 1759 goto out; 1760 } 1761 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1762 target->path.service_id = target->service_id; 1763 kfree(p); 1764 break; 1765 1766 case SRP_OPT_MAX_SECT: 1767 if (match_int(args, &token)) { 1768 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1769 goto out; 1770 } 1771 target->scsi_host->max_sectors = token; 1772 break; 1773 1774 case SRP_OPT_MAX_CMD_PER_LUN: 1775 if (match_int(args, &token)) { 1776 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); 1777 goto out; 1778 } 1779 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); 1780 break; 1781 1782 case SRP_OPT_IO_CLASS: 1783 if (match_hex(args, &token)) { 1784 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p); 1785 goto out; 1786 } 1787 if (token != SRP_REV10_IB_IO_CLASS && 1788 token != SRP_REV16A_IB_IO_CLASS) { 1789 printk(KERN_WARNING PFX "unknown IO class parameter value" 1790 " %x specified (use %x or %x).\n", 1791 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); 1792 goto out; 1793 } 1794 target->io_class = token; 1795 break; 1796 1797 case SRP_OPT_INITIATOR_EXT: 1798 p = match_strdup(args); 1799 if (!p) { 1800 ret = -ENOMEM; 1801 goto out; 1802 } 1803 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1804 kfree(p); 1805 break; 1806 1807 default: 1808 printk(KERN_WARNING PFX "unknown parameter or missing value " 1809 "'%s' in target creation request\n", p); 1810 goto out; 1811 } 1812 } 1813 1814 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1815 ret = 0; 1816 else 1817 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1818 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1819 !(srp_opt_tokens[i].token & opt_mask)) 1820 printk(KERN_WARNING PFX "target creation request is " 1821 "missing parameter '%s'\n", 1822 srp_opt_tokens[i].pattern); 1823 1824 out: 1825 kfree(options); 1826 return ret; 1827 } 1828 1829 static ssize_t srp_create_target(struct device *dev, 1830 struct device_attribute *attr, 1831 const char *buf, size_t count) 1832 { 1833 struct srp_host *host = 1834 container_of(dev, struct srp_host, dev); 1835 struct Scsi_Host *target_host; 1836 struct srp_target_port *target; 1837 int ret; 1838 int i; 1839 1840 target_host = scsi_host_alloc(&srp_template, 1841 sizeof (struct srp_target_port)); 1842 if (!target_host) 1843 return -ENOMEM; 1844 1845 target_host->transportt = ib_srp_transport_template; 1846 target_host->max_lun = SRP_MAX_LUN; 1847 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 1848 1849 target = host_to_target(target_host); 1850 1851 target->io_class = SRP_REV16A_IB_IO_CLASS; 1852 target->scsi_host = target_host; 1853 target->srp_host = host; 1854 1855 INIT_LIST_HEAD(&target->free_reqs); 1856 INIT_LIST_HEAD(&target->req_queue); 1857 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1858 target->req_ring[i].index = i; 1859 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1860 } 1861 1862 ret = srp_parse_options(buf, target); 1863 if (ret) 1864 goto err; 1865 1866 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid); 1867 1868 shost_printk(KERN_DEBUG, target->scsi_host, PFX 1869 "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1870 "service_id %016llx dgid %pI6\n", 1871 (unsigned long long) be64_to_cpu(target->id_ext), 1872 (unsigned long long) be64_to_cpu(target->ioc_guid), 1873 be16_to_cpu(target->path.pkey), 1874 (unsigned long long) be64_to_cpu(target->service_id), 1875 target->path.dgid.raw); 1876 1877 ret = srp_create_target_ib(target); 1878 if (ret) 1879 goto err; 1880 1881 ret = srp_new_cm_id(target); 1882 if (ret) 1883 goto err_free; 1884 1885 target->qp_in_error = 0; 1886 ret = srp_connect_target(target); 1887 if (ret) { 1888 shost_printk(KERN_ERR, target->scsi_host, 1889 PFX "Connection failed\n"); 1890 goto err_cm_id; 1891 } 1892 1893 ret = srp_add_target(host, target); 1894 if (ret) 1895 goto err_disconnect; 1896 1897 return count; 1898 1899 err_disconnect: 1900 srp_disconnect_target(target); 1901 1902 err_cm_id: 1903 ib_destroy_cm_id(target->cm_id); 1904 1905 err_free: 1906 srp_free_target_ib(target); 1907 1908 err: 1909 scsi_host_put(target_host); 1910 1911 return ret; 1912 } 1913 1914 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 1915 1916 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1917 char *buf) 1918 { 1919 struct srp_host *host = container_of(dev, struct srp_host, dev); 1920 1921 return sprintf(buf, "%s\n", host->srp_dev->dev->name); 1922 } 1923 1924 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1925 1926 static ssize_t show_port(struct device *dev, struct device_attribute *attr, 1927 char *buf) 1928 { 1929 struct srp_host *host = container_of(dev, struct srp_host, dev); 1930 1931 return sprintf(buf, "%d\n", host->port); 1932 } 1933 1934 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1935 1936 static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 1937 { 1938 struct srp_host *host; 1939 1940 host = kzalloc(sizeof *host, GFP_KERNEL); 1941 if (!host) 1942 return NULL; 1943 1944 INIT_LIST_HEAD(&host->target_list); 1945 spin_lock_init(&host->target_lock); 1946 init_completion(&host->released); 1947 host->srp_dev = device; 1948 host->port = port; 1949 1950 host->dev.class = &srp_class; 1951 host->dev.parent = device->dev->dma_device; 1952 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 1953 1954 if (device_register(&host->dev)) 1955 goto free_host; 1956 if (device_create_file(&host->dev, &dev_attr_add_target)) 1957 goto err_class; 1958 if (device_create_file(&host->dev, &dev_attr_ibdev)) 1959 goto err_class; 1960 if (device_create_file(&host->dev, &dev_attr_port)) 1961 goto err_class; 1962 1963 return host; 1964 1965 err_class: 1966 device_unregister(&host->dev); 1967 1968 free_host: 1969 kfree(host); 1970 1971 return NULL; 1972 } 1973 1974 static void srp_add_one(struct ib_device *device) 1975 { 1976 struct srp_device *srp_dev; 1977 struct ib_device_attr *dev_attr; 1978 struct ib_fmr_pool_param fmr_param; 1979 struct srp_host *host; 1980 int s, e, p; 1981 1982 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 1983 if (!dev_attr) 1984 return; 1985 1986 if (ib_query_device(device, dev_attr)) { 1987 printk(KERN_WARNING PFX "Query device failed for %s\n", 1988 device->name); 1989 goto free_attr; 1990 } 1991 1992 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 1993 if (!srp_dev) 1994 goto free_attr; 1995 1996 /* 1997 * Use the smallest page size supported by the HCA, down to a 1998 * minimum of 512 bytes (which is the smallest sector that a 1999 * SCSI command will ever carry). 2000 */ 2001 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); 2002 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; 2003 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2004 2005 INIT_LIST_HEAD(&srp_dev->dev_list); 2006 2007 srp_dev->dev = device; 2008 srp_dev->pd = ib_alloc_pd(device); 2009 if (IS_ERR(srp_dev->pd)) 2010 goto free_dev; 2011 2012 srp_dev->mr = ib_get_dma_mr(srp_dev->pd, 2013 IB_ACCESS_LOCAL_WRITE | 2014 IB_ACCESS_REMOTE_READ | 2015 IB_ACCESS_REMOTE_WRITE); 2016 if (IS_ERR(srp_dev->mr)) 2017 goto err_pd; 2018 2019 memset(&fmr_param, 0, sizeof fmr_param); 2020 fmr_param.pool_size = SRP_FMR_POOL_SIZE; 2021 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 2022 fmr_param.cache = 1; 2023 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; 2024 fmr_param.page_shift = srp_dev->fmr_page_shift; 2025 fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 2026 IB_ACCESS_REMOTE_WRITE | 2027 IB_ACCESS_REMOTE_READ); 2028 2029 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); 2030 if (IS_ERR(srp_dev->fmr_pool)) 2031 srp_dev->fmr_pool = NULL; 2032 2033 if (device->node_type == RDMA_NODE_IB_SWITCH) { 2034 s = 0; 2035 e = 0; 2036 } else { 2037 s = 1; 2038 e = device->phys_port_cnt; 2039 } 2040 2041 for (p = s; p <= e; ++p) { 2042 host = srp_add_port(srp_dev, p); 2043 if (host) 2044 list_add_tail(&host->list, &srp_dev->dev_list); 2045 } 2046 2047 ib_set_client_data(device, &srp_client, srp_dev); 2048 2049 goto free_attr; 2050 2051 err_pd: 2052 ib_dealloc_pd(srp_dev->pd); 2053 2054 free_dev: 2055 kfree(srp_dev); 2056 2057 free_attr: 2058 kfree(dev_attr); 2059 } 2060 2061 static void srp_remove_one(struct ib_device *device) 2062 { 2063 struct srp_device *srp_dev; 2064 struct srp_host *host, *tmp_host; 2065 LIST_HEAD(target_list); 2066 struct srp_target_port *target, *tmp_target; 2067 2068 srp_dev = ib_get_client_data(device, &srp_client); 2069 2070 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 2071 device_unregister(&host->dev); 2072 /* 2073 * Wait for the sysfs entry to go away, so that no new 2074 * target ports can be created. 2075 */ 2076 wait_for_completion(&host->released); 2077 2078 /* 2079 * Mark all target ports as removed, so we stop queueing 2080 * commands and don't try to reconnect. 2081 */ 2082 spin_lock(&host->target_lock); 2083 list_for_each_entry(target, &host->target_list, list) { 2084 spin_lock_irq(target->scsi_host->host_lock); 2085 target->state = SRP_TARGET_REMOVED; 2086 spin_unlock_irq(target->scsi_host->host_lock); 2087 } 2088 spin_unlock(&host->target_lock); 2089 2090 /* 2091 * Wait for any reconnection tasks that may have 2092 * started before we marked our target ports as 2093 * removed, and any target port removal tasks. 2094 */ 2095 flush_scheduled_work(); 2096 2097 list_for_each_entry_safe(target, tmp_target, 2098 &host->target_list, list) { 2099 srp_remove_host(target->scsi_host); 2100 scsi_remove_host(target->scsi_host); 2101 srp_disconnect_target(target); 2102 ib_destroy_cm_id(target->cm_id); 2103 srp_free_target_ib(target); 2104 scsi_host_put(target->scsi_host); 2105 } 2106 2107 kfree(host); 2108 } 2109 2110 if (srp_dev->fmr_pool) 2111 ib_destroy_fmr_pool(srp_dev->fmr_pool); 2112 ib_dereg_mr(srp_dev->mr); 2113 ib_dealloc_pd(srp_dev->pd); 2114 2115 kfree(srp_dev); 2116 } 2117 2118 static struct srp_function_template ib_srp_transport_functions = { 2119 }; 2120 2121 static int __init srp_init_module(void) 2122 { 2123 int ret; 2124 2125 if (srp_sg_tablesize > 255) { 2126 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2127 srp_sg_tablesize = 255; 2128 } 2129 2130 ib_srp_transport_template = 2131 srp_attach_transport(&ib_srp_transport_functions); 2132 if (!ib_srp_transport_template) 2133 return -ENOMEM; 2134 2135 srp_template.sg_tablesize = srp_sg_tablesize; 2136 srp_max_iu_len = (sizeof (struct srp_cmd) + 2137 sizeof (struct srp_indirect_buf) + 2138 srp_sg_tablesize * 16); 2139 2140 ret = class_register(&srp_class); 2141 if (ret) { 2142 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 2143 srp_release_transport(ib_srp_transport_template); 2144 return ret; 2145 } 2146 2147 ib_sa_register_client(&srp_sa_client); 2148 2149 ret = ib_register_client(&srp_client); 2150 if (ret) { 2151 printk(KERN_ERR PFX "couldn't register IB client\n"); 2152 srp_release_transport(ib_srp_transport_template); 2153 ib_sa_unregister_client(&srp_sa_client); 2154 class_unregister(&srp_class); 2155 return ret; 2156 } 2157 2158 return 0; 2159 } 2160 2161 static void __exit srp_cleanup_module(void) 2162 { 2163 ib_unregister_client(&srp_client); 2164 ib_sa_unregister_client(&srp_sa_client); 2165 class_unregister(&srp_class); 2166 srp_release_transport(ib_srp_transport_template); 2167 } 2168 2169 module_init(srp_init_module); 2170 module_exit(srp_cleanup_module); 2171