1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $ 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/string.h> 40 #include <linux/parser.h> 41 #include <linux/random.h> 42 #include <linux/jiffies.h> 43 44 #include <asm/atomic.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_dbg.h> 49 #include <scsi/srp.h> 50 #include <scsi/scsi_transport_srp.h> 51 52 #include <rdma/ib_cache.h> 53 54 #include "ib_srp.h" 55 56 #define DRV_NAME "ib_srp" 57 #define PFX DRV_NAME ": " 58 #define DRV_VERSION "0.2" 59 #define DRV_RELDATE "November 1, 2005" 60 61 MODULE_AUTHOR("Roland Dreier"); 62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 63 "v" DRV_VERSION " (" DRV_RELDATE ")"); 64 MODULE_LICENSE("Dual BSD/GPL"); 65 66 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE; 67 static int srp_max_iu_len; 68 69 module_param(srp_sg_tablesize, int, 0444); 70 MODULE_PARM_DESC(srp_sg_tablesize, 71 "Max number of gather/scatter entries per I/O (default is 12)"); 72 73 static int topspin_workarounds = 1; 74 75 module_param(topspin_workarounds, int, 0444); 76 MODULE_PARM_DESC(topspin_workarounds, 77 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 78 79 static int mellanox_workarounds = 1; 80 81 module_param(mellanox_workarounds, int, 0444); 82 MODULE_PARM_DESC(mellanox_workarounds, 83 "Enable workarounds for Mellanox SRP target bugs if != 0"); 84 85 static void srp_add_one(struct ib_device *device); 86 static void srp_remove_one(struct ib_device *device); 87 static void srp_completion(struct ib_cq *cq, void *target_ptr); 88 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 89 90 static struct scsi_transport_template *ib_srp_transport_template; 91 92 static struct ib_client srp_client = { 93 .name = "srp", 94 .add = srp_add_one, 95 .remove = srp_remove_one 96 }; 97 98 static struct ib_sa_client srp_sa_client; 99 100 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 101 { 102 return (struct srp_target_port *) host->hostdata; 103 } 104 105 static const char *srp_target_info(struct Scsi_Host *host) 106 { 107 return host_to_target(host)->target_name; 108 } 109 110 static int srp_target_is_topspin(struct srp_target_port *target) 111 { 112 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 113 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 114 115 return topspin_workarounds && 116 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 117 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 118 } 119 120 static int srp_target_is_mellanox(struct srp_target_port *target) 121 { 122 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; 123 124 return mellanox_workarounds && 125 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui); 126 } 127 128 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 129 gfp_t gfp_mask, 130 enum dma_data_direction direction) 131 { 132 struct srp_iu *iu; 133 134 iu = kmalloc(sizeof *iu, gfp_mask); 135 if (!iu) 136 goto out; 137 138 iu->buf = kzalloc(size, gfp_mask); 139 if (!iu->buf) 140 goto out_free_iu; 141 142 iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction); 143 if (ib_dma_mapping_error(host->dev->dev, iu->dma)) 144 goto out_free_buf; 145 146 iu->size = size; 147 iu->direction = direction; 148 149 return iu; 150 151 out_free_buf: 152 kfree(iu->buf); 153 out_free_iu: 154 kfree(iu); 155 out: 156 return NULL; 157 } 158 159 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 160 { 161 if (!iu) 162 return; 163 164 ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction); 165 kfree(iu->buf); 166 kfree(iu); 167 } 168 169 static void srp_qp_event(struct ib_event *event, void *context) 170 { 171 printk(KERN_ERR PFX "QP event %d\n", event->event); 172 } 173 174 static int srp_init_qp(struct srp_target_port *target, 175 struct ib_qp *qp) 176 { 177 struct ib_qp_attr *attr; 178 int ret; 179 180 attr = kmalloc(sizeof *attr, GFP_KERNEL); 181 if (!attr) 182 return -ENOMEM; 183 184 ret = ib_find_cached_pkey(target->srp_host->dev->dev, 185 target->srp_host->port, 186 be16_to_cpu(target->path.pkey), 187 &attr->pkey_index); 188 if (ret) 189 goto out; 190 191 attr->qp_state = IB_QPS_INIT; 192 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 193 IB_ACCESS_REMOTE_WRITE); 194 attr->port_num = target->srp_host->port; 195 196 ret = ib_modify_qp(qp, attr, 197 IB_QP_STATE | 198 IB_QP_PKEY_INDEX | 199 IB_QP_ACCESS_FLAGS | 200 IB_QP_PORT); 201 202 out: 203 kfree(attr); 204 return ret; 205 } 206 207 static int srp_create_target_ib(struct srp_target_port *target) 208 { 209 struct ib_qp_init_attr *init_attr; 210 int ret; 211 212 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 213 if (!init_attr) 214 return -ENOMEM; 215 216 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion, 217 NULL, target, SRP_CQ_SIZE, 0); 218 if (IS_ERR(target->cq)) { 219 ret = PTR_ERR(target->cq); 220 goto out; 221 } 222 223 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 224 225 init_attr->event_handler = srp_qp_event; 226 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 227 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 228 init_attr->cap.max_recv_sge = 1; 229 init_attr->cap.max_send_sge = 1; 230 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 231 init_attr->qp_type = IB_QPT_RC; 232 init_attr->send_cq = target->cq; 233 init_attr->recv_cq = target->cq; 234 235 target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr); 236 if (IS_ERR(target->qp)) { 237 ret = PTR_ERR(target->qp); 238 ib_destroy_cq(target->cq); 239 goto out; 240 } 241 242 ret = srp_init_qp(target, target->qp); 243 if (ret) { 244 ib_destroy_qp(target->qp); 245 ib_destroy_cq(target->cq); 246 goto out; 247 } 248 249 out: 250 kfree(init_attr); 251 return ret; 252 } 253 254 static void srp_free_target_ib(struct srp_target_port *target) 255 { 256 int i; 257 258 ib_destroy_qp(target->qp); 259 ib_destroy_cq(target->cq); 260 261 for (i = 0; i < SRP_RQ_SIZE; ++i) 262 srp_free_iu(target->srp_host, target->rx_ring[i]); 263 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 264 srp_free_iu(target->srp_host, target->tx_ring[i]); 265 } 266 267 static void srp_path_rec_completion(int status, 268 struct ib_sa_path_rec *pathrec, 269 void *target_ptr) 270 { 271 struct srp_target_port *target = target_ptr; 272 273 target->status = status; 274 if (status) 275 printk(KERN_ERR PFX "Got failed path rec status %d\n", status); 276 else 277 target->path = *pathrec; 278 complete(&target->done); 279 } 280 281 static int srp_lookup_path(struct srp_target_port *target) 282 { 283 target->path.numb_path = 1; 284 285 init_completion(&target->done); 286 287 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 288 target->srp_host->dev->dev, 289 target->srp_host->port, 290 &target->path, 291 IB_SA_PATH_REC_SERVICE_ID | 292 IB_SA_PATH_REC_DGID | 293 IB_SA_PATH_REC_SGID | 294 IB_SA_PATH_REC_NUMB_PATH | 295 IB_SA_PATH_REC_PKEY, 296 SRP_PATH_REC_TIMEOUT_MS, 297 GFP_KERNEL, 298 srp_path_rec_completion, 299 target, &target->path_query); 300 if (target->path_query_id < 0) 301 return target->path_query_id; 302 303 wait_for_completion(&target->done); 304 305 if (target->status < 0) 306 printk(KERN_WARNING PFX "Path record query failed\n"); 307 308 return target->status; 309 } 310 311 static int srp_send_req(struct srp_target_port *target) 312 { 313 struct { 314 struct ib_cm_req_param param; 315 struct srp_login_req priv; 316 } *req = NULL; 317 int status; 318 319 req = kzalloc(sizeof *req, GFP_KERNEL); 320 if (!req) 321 return -ENOMEM; 322 323 req->param.primary_path = &target->path; 324 req->param.alternate_path = NULL; 325 req->param.service_id = target->service_id; 326 req->param.qp_num = target->qp->qp_num; 327 req->param.qp_type = target->qp->qp_type; 328 req->param.private_data = &req->priv; 329 req->param.private_data_len = sizeof req->priv; 330 req->param.flow_control = 1; 331 332 get_random_bytes(&req->param.starting_psn, 4); 333 req->param.starting_psn &= 0xffffff; 334 335 /* 336 * Pick some arbitrary defaults here; we could make these 337 * module parameters if anyone cared about setting them. 338 */ 339 req->param.responder_resources = 4; 340 req->param.remote_cm_response_timeout = 20; 341 req->param.local_cm_response_timeout = 20; 342 req->param.retry_count = 7; 343 req->param.rnr_retry_count = 7; 344 req->param.max_cm_retries = 15; 345 346 req->priv.opcode = SRP_LOGIN_REQ; 347 req->priv.tag = 0; 348 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len); 349 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 350 SRP_BUF_FORMAT_INDIRECT); 351 /* 352 * In the published SRP specification (draft rev. 16a), the 353 * port identifier format is 8 bytes of ID extension followed 354 * by 8 bytes of GUID. Older drafts put the two halves in the 355 * opposite order, so that the GUID comes first. 356 * 357 * Targets conforming to these obsolete drafts can be 358 * recognized by the I/O Class they report. 359 */ 360 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 361 memcpy(req->priv.initiator_port_id, 362 &target->path.sgid.global.interface_id, 8); 363 memcpy(req->priv.initiator_port_id + 8, 364 &target->initiator_ext, 8); 365 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 366 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 367 } else { 368 memcpy(req->priv.initiator_port_id, 369 &target->initiator_ext, 8); 370 memcpy(req->priv.initiator_port_id + 8, 371 &target->path.sgid.global.interface_id, 8); 372 memcpy(req->priv.target_port_id, &target->id_ext, 8); 373 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 374 } 375 376 /* 377 * Topspin/Cisco SRP targets will reject our login unless we 378 * zero out the first 8 bytes of our initiator port ID and set 379 * the second 8 bytes to the local node GUID. 380 */ 381 if (srp_target_is_topspin(target)) { 382 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " 383 "activated for target GUID %016llx\n", 384 (unsigned long long) be64_to_cpu(target->ioc_guid)); 385 memset(req->priv.initiator_port_id, 0, 8); 386 memcpy(req->priv.initiator_port_id + 8, 387 &target->srp_host->dev->dev->node_guid, 8); 388 } 389 390 status = ib_send_cm_req(target->cm_id, &req->param); 391 392 kfree(req); 393 394 return status; 395 } 396 397 static void srp_disconnect_target(struct srp_target_port *target) 398 { 399 /* XXX should send SRP_I_LOGOUT request */ 400 401 init_completion(&target->done); 402 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 403 printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); 404 return; 405 } 406 wait_for_completion(&target->done); 407 } 408 409 static void srp_remove_work(struct work_struct *work) 410 { 411 struct srp_target_port *target = 412 container_of(work, struct srp_target_port, work); 413 414 spin_lock_irq(target->scsi_host->host_lock); 415 if (target->state != SRP_TARGET_DEAD) { 416 spin_unlock_irq(target->scsi_host->host_lock); 417 return; 418 } 419 target->state = SRP_TARGET_REMOVED; 420 spin_unlock_irq(target->scsi_host->host_lock); 421 422 spin_lock(&target->srp_host->target_lock); 423 list_del(&target->list); 424 spin_unlock(&target->srp_host->target_lock); 425 426 srp_remove_host(target->scsi_host); 427 scsi_remove_host(target->scsi_host); 428 ib_destroy_cm_id(target->cm_id); 429 srp_free_target_ib(target); 430 scsi_host_put(target->scsi_host); 431 } 432 433 static int srp_connect_target(struct srp_target_port *target) 434 { 435 int ret; 436 437 ret = srp_lookup_path(target); 438 if (ret) 439 return ret; 440 441 while (1) { 442 init_completion(&target->done); 443 ret = srp_send_req(target); 444 if (ret) 445 return ret; 446 wait_for_completion(&target->done); 447 448 /* 449 * The CM event handling code will set status to 450 * SRP_PORT_REDIRECT if we get a port redirect REJ 451 * back, or SRP_DLID_REDIRECT if we get a lid/qp 452 * redirect REJ back. 453 */ 454 switch (target->status) { 455 case 0: 456 return 0; 457 458 case SRP_PORT_REDIRECT: 459 ret = srp_lookup_path(target); 460 if (ret) 461 return ret; 462 break; 463 464 case SRP_DLID_REDIRECT: 465 break; 466 467 default: 468 return target->status; 469 } 470 } 471 } 472 473 static void srp_unmap_data(struct scsi_cmnd *scmnd, 474 struct srp_target_port *target, 475 struct srp_request *req) 476 { 477 if (!scsi_sglist(scmnd) || 478 (scmnd->sc_data_direction != DMA_TO_DEVICE && 479 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 480 return; 481 482 if (req->fmr) { 483 ib_fmr_pool_unmap(req->fmr); 484 req->fmr = NULL; 485 } 486 487 ib_dma_unmap_sg(target->srp_host->dev->dev, scsi_sglist(scmnd), 488 scsi_sg_count(scmnd), scmnd->sc_data_direction); 489 } 490 491 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 492 { 493 srp_unmap_data(req->scmnd, target, req); 494 list_move_tail(&req->list, &target->free_reqs); 495 } 496 497 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 498 { 499 req->scmnd->result = DID_RESET << 16; 500 req->scmnd->scsi_done(req->scmnd); 501 srp_remove_req(target, req); 502 } 503 504 static int srp_reconnect_target(struct srp_target_port *target) 505 { 506 struct ib_cm_id *new_cm_id; 507 struct ib_qp_attr qp_attr; 508 struct srp_request *req, *tmp; 509 struct ib_wc wc; 510 int ret; 511 512 spin_lock_irq(target->scsi_host->host_lock); 513 if (target->state != SRP_TARGET_LIVE) { 514 spin_unlock_irq(target->scsi_host->host_lock); 515 return -EAGAIN; 516 } 517 target->state = SRP_TARGET_CONNECTING; 518 spin_unlock_irq(target->scsi_host->host_lock); 519 520 srp_disconnect_target(target); 521 /* 522 * Now get a new local CM ID so that we avoid confusing the 523 * target in case things are really fouled up. 524 */ 525 new_cm_id = ib_create_cm_id(target->srp_host->dev->dev, 526 srp_cm_handler, target); 527 if (IS_ERR(new_cm_id)) { 528 ret = PTR_ERR(new_cm_id); 529 goto err; 530 } 531 ib_destroy_cm_id(target->cm_id); 532 target->cm_id = new_cm_id; 533 534 qp_attr.qp_state = IB_QPS_RESET; 535 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 536 if (ret) 537 goto err; 538 539 ret = srp_init_qp(target, target->qp); 540 if (ret) 541 goto err; 542 543 while (ib_poll_cq(target->cq, 1, &wc) > 0) 544 ; /* nothing */ 545 546 spin_lock_irq(target->scsi_host->host_lock); 547 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 548 srp_reset_req(target, req); 549 spin_unlock_irq(target->scsi_host->host_lock); 550 551 target->rx_head = 0; 552 target->tx_head = 0; 553 target->tx_tail = 0; 554 555 target->qp_in_error = 0; 556 ret = srp_connect_target(target); 557 if (ret) 558 goto err; 559 560 spin_lock_irq(target->scsi_host->host_lock); 561 if (target->state == SRP_TARGET_CONNECTING) { 562 ret = 0; 563 target->state = SRP_TARGET_LIVE; 564 } else 565 ret = -EAGAIN; 566 spin_unlock_irq(target->scsi_host->host_lock); 567 568 return ret; 569 570 err: 571 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret); 572 573 /* 574 * We couldn't reconnect, so kill our target port off. 575 * However, we have to defer the real removal because we might 576 * be in the context of the SCSI error handler now, which 577 * would deadlock if we call scsi_remove_host(). 578 */ 579 spin_lock_irq(target->scsi_host->host_lock); 580 if (target->state == SRP_TARGET_CONNECTING) { 581 target->state = SRP_TARGET_DEAD; 582 INIT_WORK(&target->work, srp_remove_work); 583 schedule_work(&target->work); 584 } 585 spin_unlock_irq(target->scsi_host->host_lock); 586 587 return ret; 588 } 589 590 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, 591 int sg_cnt, struct srp_request *req, 592 struct srp_direct_buf *buf) 593 { 594 u64 io_addr = 0; 595 u64 *dma_pages; 596 u32 len; 597 int page_cnt; 598 int i, j; 599 int ret; 600 struct srp_device *dev = target->srp_host->dev; 601 struct ib_device *ibdev = dev->dev; 602 struct scatterlist *sg; 603 604 if (!dev->fmr_pool) 605 return -ENODEV; 606 607 if (srp_target_is_mellanox(target) && 608 (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask)) 609 return -EINVAL; 610 611 len = page_cnt = 0; 612 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 613 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 614 615 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { 616 if (i > 0) 617 return -EINVAL; 618 else 619 ++page_cnt; 620 } 621 if ((ib_sg_dma_address(ibdev, sg) + dma_len) & 622 ~dev->fmr_page_mask) { 623 if (i < sg_cnt - 1) 624 return -EINVAL; 625 else 626 ++page_cnt; 627 } 628 629 len += dma_len; 630 } 631 632 page_cnt += len >> dev->fmr_page_shift; 633 if (page_cnt > SRP_FMR_SIZE) 634 return -ENOMEM; 635 636 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); 637 if (!dma_pages) 638 return -ENOMEM; 639 640 page_cnt = 0; 641 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 642 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 643 644 for (j = 0; j < dma_len; j += dev->fmr_page_size) 645 dma_pages[page_cnt++] = 646 (ib_sg_dma_address(ibdev, sg) & 647 dev->fmr_page_mask) + j; 648 } 649 650 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 651 dma_pages, page_cnt, io_addr); 652 if (IS_ERR(req->fmr)) { 653 ret = PTR_ERR(req->fmr); 654 req->fmr = NULL; 655 goto out; 656 } 657 658 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 659 ~dev->fmr_page_mask); 660 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 661 buf->len = cpu_to_be32(len); 662 663 ret = 0; 664 665 out: 666 kfree(dma_pages); 667 668 return ret; 669 } 670 671 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 672 struct srp_request *req) 673 { 674 struct scatterlist *scat; 675 struct srp_cmd *cmd = req->cmd->buf; 676 int len, nents, count; 677 u8 fmt = SRP_DATA_DESC_DIRECT; 678 struct srp_device *dev; 679 struct ib_device *ibdev; 680 681 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 682 return sizeof (struct srp_cmd); 683 684 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 685 scmnd->sc_data_direction != DMA_TO_DEVICE) { 686 printk(KERN_WARNING PFX "Unhandled data direction %d\n", 687 scmnd->sc_data_direction); 688 return -EINVAL; 689 } 690 691 nents = scsi_sg_count(scmnd); 692 scat = scsi_sglist(scmnd); 693 694 dev = target->srp_host->dev; 695 ibdev = dev->dev; 696 697 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 698 699 fmt = SRP_DATA_DESC_DIRECT; 700 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 701 702 if (count == 1) { 703 /* 704 * The midlayer only generated a single gather/scatter 705 * entry, or DMA mapping coalesced everything to a 706 * single entry. So a direct descriptor along with 707 * the DMA MR suffices. 708 */ 709 struct srp_direct_buf *buf = (void *) cmd->add_data; 710 711 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 712 buf->key = cpu_to_be32(dev->mr->rkey); 713 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 714 } else if (srp_map_fmr(target, scat, count, req, 715 (void *) cmd->add_data)) { 716 /* 717 * FMR mapping failed, and the scatterlist has more 718 * than one entry. Generate an indirect memory 719 * descriptor. 720 */ 721 struct srp_indirect_buf *buf = (void *) cmd->add_data; 722 struct scatterlist *sg; 723 u32 datalen = 0; 724 int i; 725 726 fmt = SRP_DATA_DESC_INDIRECT; 727 len = sizeof (struct srp_cmd) + 728 sizeof (struct srp_indirect_buf) + 729 count * sizeof (struct srp_direct_buf); 730 731 scsi_for_each_sg(scmnd, sg, count, i) { 732 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 733 734 buf->desc_list[i].va = 735 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 736 buf->desc_list[i].key = 737 cpu_to_be32(dev->mr->rkey); 738 buf->desc_list[i].len = cpu_to_be32(dma_len); 739 datalen += dma_len; 740 } 741 742 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 743 cmd->data_out_desc_cnt = count; 744 else 745 cmd->data_in_desc_cnt = count; 746 747 buf->table_desc.va = 748 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 749 buf->table_desc.key = 750 cpu_to_be32(target->srp_host->dev->mr->rkey); 751 buf->table_desc.len = 752 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 753 754 buf->len = cpu_to_be32(datalen); 755 } 756 757 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 758 cmd->buf_fmt = fmt << 4; 759 else 760 cmd->buf_fmt = fmt; 761 762 return len; 763 } 764 765 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 766 { 767 struct srp_request *req; 768 struct scsi_cmnd *scmnd; 769 unsigned long flags; 770 s32 delta; 771 772 delta = (s32) be32_to_cpu(rsp->req_lim_delta); 773 774 spin_lock_irqsave(target->scsi_host->host_lock, flags); 775 776 target->req_lim += delta; 777 778 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; 779 780 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 781 if (be32_to_cpu(rsp->resp_data_len) < 4) 782 req->tsk_status = -1; 783 else 784 req->tsk_status = rsp->data[3]; 785 complete(&req->done); 786 } else { 787 scmnd = req->scmnd; 788 if (!scmnd) 789 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 790 (unsigned long long) rsp->tag); 791 scmnd->result = rsp->status; 792 793 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 794 memcpy(scmnd->sense_buffer, rsp->data + 795 be32_to_cpu(rsp->resp_data_len), 796 min_t(int, be32_to_cpu(rsp->sense_data_len), 797 SCSI_SENSE_BUFFERSIZE)); 798 } 799 800 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 801 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 802 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 803 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 804 805 if (!req->tsk_mgmt) { 806 scmnd->host_scribble = (void *) -1L; 807 scmnd->scsi_done(scmnd); 808 809 srp_remove_req(target, req); 810 } else 811 req->cmd_done = 1; 812 } 813 814 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 815 } 816 817 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 818 { 819 struct ib_device *dev; 820 struct srp_iu *iu; 821 u8 opcode; 822 823 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 824 825 dev = target->srp_host->dev->dev; 826 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 827 DMA_FROM_DEVICE); 828 829 opcode = *(u8 *) iu->buf; 830 831 if (0) { 832 int i; 833 834 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); 835 836 for (i = 0; i < wc->byte_len; ++i) { 837 if (i % 8 == 0) 838 printk(KERN_ERR " [%02x] ", i); 839 printk(" %02x", ((u8 *) iu->buf)[i]); 840 if ((i + 1) % 8 == 0) 841 printk("\n"); 842 } 843 844 if (wc->byte_len % 8) 845 printk("\n"); 846 } 847 848 switch (opcode) { 849 case SRP_RSP: 850 srp_process_rsp(target, iu->buf); 851 break; 852 853 case SRP_T_LOGOUT: 854 /* XXX Handle target logout */ 855 printk(KERN_WARNING PFX "Got target logout request\n"); 856 break; 857 858 default: 859 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); 860 break; 861 } 862 863 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 864 DMA_FROM_DEVICE); 865 } 866 867 static void srp_completion(struct ib_cq *cq, void *target_ptr) 868 { 869 struct srp_target_port *target = target_ptr; 870 struct ib_wc wc; 871 872 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 873 while (ib_poll_cq(cq, 1, &wc) > 0) { 874 if (wc.status) { 875 printk(KERN_ERR PFX "failed %s status %d\n", 876 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 877 wc.status); 878 target->qp_in_error = 1; 879 break; 880 } 881 882 if (wc.wr_id & SRP_OP_RECV) 883 srp_handle_recv(target, &wc); 884 else 885 ++target->tx_tail; 886 } 887 } 888 889 static int __srp_post_recv(struct srp_target_port *target) 890 { 891 struct srp_iu *iu; 892 struct ib_sge list; 893 struct ib_recv_wr wr, *bad_wr; 894 unsigned int next; 895 int ret; 896 897 next = target->rx_head & (SRP_RQ_SIZE - 1); 898 wr.wr_id = next | SRP_OP_RECV; 899 iu = target->rx_ring[next]; 900 901 list.addr = iu->dma; 902 list.length = iu->size; 903 list.lkey = target->srp_host->dev->mr->lkey; 904 905 wr.next = NULL; 906 wr.sg_list = &list; 907 wr.num_sge = 1; 908 909 ret = ib_post_recv(target->qp, &wr, &bad_wr); 910 if (!ret) 911 ++target->rx_head; 912 913 return ret; 914 } 915 916 static int srp_post_recv(struct srp_target_port *target) 917 { 918 unsigned long flags; 919 int ret; 920 921 spin_lock_irqsave(target->scsi_host->host_lock, flags); 922 ret = __srp_post_recv(target); 923 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 924 925 return ret; 926 } 927 928 /* 929 * Must be called with target->scsi_host->host_lock held to protect 930 * req_lim and tx_head. Lock cannot be dropped between call here and 931 * call to __srp_post_send(). 932 */ 933 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) 934 { 935 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 936 return NULL; 937 938 if (unlikely(target->req_lim < 1)) 939 ++target->zero_req_lim; 940 941 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 942 } 943 944 /* 945 * Must be called with target->scsi_host->host_lock held to protect 946 * req_lim and tx_head. 947 */ 948 static int __srp_post_send(struct srp_target_port *target, 949 struct srp_iu *iu, int len) 950 { 951 struct ib_sge list; 952 struct ib_send_wr wr, *bad_wr; 953 int ret = 0; 954 955 list.addr = iu->dma; 956 list.length = len; 957 list.lkey = target->srp_host->dev->mr->lkey; 958 959 wr.next = NULL; 960 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 961 wr.sg_list = &list; 962 wr.num_sge = 1; 963 wr.opcode = IB_WR_SEND; 964 wr.send_flags = IB_SEND_SIGNALED; 965 966 ret = ib_post_send(target->qp, &wr, &bad_wr); 967 968 if (!ret) { 969 ++target->tx_head; 970 --target->req_lim; 971 } 972 973 return ret; 974 } 975 976 static int srp_queuecommand(struct scsi_cmnd *scmnd, 977 void (*done)(struct scsi_cmnd *)) 978 { 979 struct srp_target_port *target = host_to_target(scmnd->device->host); 980 struct srp_request *req; 981 struct srp_iu *iu; 982 struct srp_cmd *cmd; 983 struct ib_device *dev; 984 int len; 985 986 if (target->state == SRP_TARGET_CONNECTING) 987 goto err; 988 989 if (target->state == SRP_TARGET_DEAD || 990 target->state == SRP_TARGET_REMOVED) { 991 scmnd->result = DID_BAD_TARGET << 16; 992 done(scmnd); 993 return 0; 994 } 995 996 iu = __srp_get_tx_iu(target); 997 if (!iu) 998 goto err; 999 1000 dev = target->srp_host->dev->dev; 1001 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1002 DMA_TO_DEVICE); 1003 1004 req = list_entry(target->free_reqs.next, struct srp_request, list); 1005 1006 scmnd->scsi_done = done; 1007 scmnd->result = 0; 1008 scmnd->host_scribble = (void *) (long) req->index; 1009 1010 cmd = iu->buf; 1011 memset(cmd, 0, sizeof *cmd); 1012 1013 cmd->opcode = SRP_CMD; 1014 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1015 cmd->tag = req->index; 1016 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 1017 1018 req->scmnd = scmnd; 1019 req->cmd = iu; 1020 req->cmd_done = 0; 1021 req->tsk_mgmt = NULL; 1022 1023 len = srp_map_data(scmnd, target, req); 1024 if (len < 0) { 1025 printk(KERN_ERR PFX "Failed to map data\n"); 1026 goto err; 1027 } 1028 1029 if (__srp_post_recv(target)) { 1030 printk(KERN_ERR PFX "Recv failed\n"); 1031 goto err_unmap; 1032 } 1033 1034 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1035 DMA_TO_DEVICE); 1036 1037 if (__srp_post_send(target, iu, len)) { 1038 printk(KERN_ERR PFX "Send failed\n"); 1039 goto err_unmap; 1040 } 1041 1042 list_move_tail(&req->list, &target->req_queue); 1043 1044 return 0; 1045 1046 err_unmap: 1047 srp_unmap_data(scmnd, target, req); 1048 1049 err: 1050 return SCSI_MLQUEUE_HOST_BUSY; 1051 } 1052 1053 static int srp_alloc_iu_bufs(struct srp_target_port *target) 1054 { 1055 int i; 1056 1057 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1058 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 1059 target->max_ti_iu_len, 1060 GFP_KERNEL, DMA_FROM_DEVICE); 1061 if (!target->rx_ring[i]) 1062 goto err; 1063 } 1064 1065 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1066 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1067 srp_max_iu_len, 1068 GFP_KERNEL, DMA_TO_DEVICE); 1069 if (!target->tx_ring[i]) 1070 goto err; 1071 } 1072 1073 return 0; 1074 1075 err: 1076 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1077 srp_free_iu(target->srp_host, target->rx_ring[i]); 1078 target->rx_ring[i] = NULL; 1079 } 1080 1081 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1082 srp_free_iu(target->srp_host, target->tx_ring[i]); 1083 target->tx_ring[i] = NULL; 1084 } 1085 1086 return -ENOMEM; 1087 } 1088 1089 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 1090 struct ib_cm_event *event, 1091 struct srp_target_port *target) 1092 { 1093 struct ib_class_port_info *cpi; 1094 int opcode; 1095 1096 switch (event->param.rej_rcvd.reason) { 1097 case IB_CM_REJ_PORT_CM_REDIRECT: 1098 cpi = event->param.rej_rcvd.ari; 1099 target->path.dlid = cpi->redirect_lid; 1100 target->path.pkey = cpi->redirect_pkey; 1101 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 1102 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 1103 1104 target->status = target->path.dlid ? 1105 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 1106 break; 1107 1108 case IB_CM_REJ_PORT_REDIRECT: 1109 if (srp_target_is_topspin(target)) { 1110 /* 1111 * Topspin/Cisco SRP gateways incorrectly send 1112 * reject reason code 25 when they mean 24 1113 * (port redirect). 1114 */ 1115 memcpy(target->path.dgid.raw, 1116 event->param.rej_rcvd.ari, 16); 1117 1118 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1119 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1120 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1121 1122 target->status = SRP_PORT_REDIRECT; 1123 } else { 1124 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1125 target->status = -ECONNRESET; 1126 } 1127 break; 1128 1129 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1130 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1131 target->status = -ECONNRESET; 1132 break; 1133 1134 case IB_CM_REJ_CONSUMER_DEFINED: 1135 opcode = *(u8 *) event->private_data; 1136 if (opcode == SRP_LOGIN_REJ) { 1137 struct srp_login_rej *rej = event->private_data; 1138 u32 reason = be32_to_cpu(rej->reason); 1139 1140 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1141 printk(KERN_WARNING PFX 1142 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1143 else 1144 printk(KERN_WARNING PFX 1145 "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1146 } else 1147 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1148 " opcode 0x%02x\n", opcode); 1149 target->status = -ECONNRESET; 1150 break; 1151 1152 default: 1153 printk(KERN_WARNING " REJ reason 0x%x\n", 1154 event->param.rej_rcvd.reason); 1155 target->status = -ECONNRESET; 1156 } 1157 } 1158 1159 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1160 { 1161 struct srp_target_port *target = cm_id->context; 1162 struct ib_qp_attr *qp_attr = NULL; 1163 int attr_mask = 0; 1164 int comp = 0; 1165 int opcode = 0; 1166 1167 switch (event->event) { 1168 case IB_CM_REQ_ERROR: 1169 printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); 1170 comp = 1; 1171 target->status = -ECONNRESET; 1172 break; 1173 1174 case IB_CM_REP_RECEIVED: 1175 comp = 1; 1176 opcode = *(u8 *) event->private_data; 1177 1178 if (opcode == SRP_LOGIN_RSP) { 1179 struct srp_login_rsp *rsp = event->private_data; 1180 1181 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1182 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1183 1184 target->scsi_host->can_queue = min(target->req_lim, 1185 target->scsi_host->can_queue); 1186 } else { 1187 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); 1188 target->status = -ECONNRESET; 1189 break; 1190 } 1191 1192 if (!target->rx_ring[0]) { 1193 target->status = srp_alloc_iu_bufs(target); 1194 if (target->status) 1195 break; 1196 } 1197 1198 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1199 if (!qp_attr) { 1200 target->status = -ENOMEM; 1201 break; 1202 } 1203 1204 qp_attr->qp_state = IB_QPS_RTR; 1205 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1206 if (target->status) 1207 break; 1208 1209 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1210 if (target->status) 1211 break; 1212 1213 target->status = srp_post_recv(target); 1214 if (target->status) 1215 break; 1216 1217 qp_attr->qp_state = IB_QPS_RTS; 1218 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1219 if (target->status) 1220 break; 1221 1222 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1223 if (target->status) 1224 break; 1225 1226 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1227 if (target->status) 1228 break; 1229 1230 break; 1231 1232 case IB_CM_REJ_RECEIVED: 1233 printk(KERN_DEBUG PFX "REJ received\n"); 1234 comp = 1; 1235 1236 srp_cm_rej_handler(cm_id, event, target); 1237 break; 1238 1239 case IB_CM_DREQ_RECEIVED: 1240 printk(KERN_WARNING PFX "DREQ received - connection closed\n"); 1241 if (ib_send_cm_drep(cm_id, NULL, 0)) 1242 printk(KERN_ERR PFX "Sending CM DREP failed\n"); 1243 break; 1244 1245 case IB_CM_TIMEWAIT_EXIT: 1246 printk(KERN_ERR PFX "connection closed\n"); 1247 1248 comp = 1; 1249 target->status = 0; 1250 break; 1251 1252 case IB_CM_MRA_RECEIVED: 1253 case IB_CM_DREQ_ERROR: 1254 case IB_CM_DREP_RECEIVED: 1255 break; 1256 1257 default: 1258 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); 1259 break; 1260 } 1261 1262 if (comp) 1263 complete(&target->done); 1264 1265 kfree(qp_attr); 1266 1267 return 0; 1268 } 1269 1270 static int srp_send_tsk_mgmt(struct srp_target_port *target, 1271 struct srp_request *req, u8 func) 1272 { 1273 struct srp_iu *iu; 1274 struct srp_tsk_mgmt *tsk_mgmt; 1275 1276 spin_lock_irq(target->scsi_host->host_lock); 1277 1278 if (target->state == SRP_TARGET_DEAD || 1279 target->state == SRP_TARGET_REMOVED) { 1280 req->scmnd->result = DID_BAD_TARGET << 16; 1281 goto out; 1282 } 1283 1284 init_completion(&req->done); 1285 1286 iu = __srp_get_tx_iu(target); 1287 if (!iu) 1288 goto out; 1289 1290 tsk_mgmt = iu->buf; 1291 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1292 1293 tsk_mgmt->opcode = SRP_TSK_MGMT; 1294 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1295 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1296 tsk_mgmt->tsk_mgmt_func = func; 1297 tsk_mgmt->task_tag = req->index; 1298 1299 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1300 goto out; 1301 1302 req->tsk_mgmt = iu; 1303 1304 spin_unlock_irq(target->scsi_host->host_lock); 1305 1306 if (!wait_for_completion_timeout(&req->done, 1307 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1308 return -1; 1309 1310 return 0; 1311 1312 out: 1313 spin_unlock_irq(target->scsi_host->host_lock); 1314 return -1; 1315 } 1316 1317 static int srp_find_req(struct srp_target_port *target, 1318 struct scsi_cmnd *scmnd, 1319 struct srp_request **req) 1320 { 1321 if (scmnd->host_scribble == (void *) -1L) 1322 return -1; 1323 1324 *req = &target->req_ring[(long) scmnd->host_scribble]; 1325 1326 return 0; 1327 } 1328 1329 static int srp_abort(struct scsi_cmnd *scmnd) 1330 { 1331 struct srp_target_port *target = host_to_target(scmnd->device->host); 1332 struct srp_request *req; 1333 int ret = SUCCESS; 1334 1335 printk(KERN_ERR "SRP abort called\n"); 1336 1337 if (target->qp_in_error) 1338 return FAILED; 1339 if (srp_find_req(target, scmnd, &req)) 1340 return FAILED; 1341 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1342 return FAILED; 1343 1344 spin_lock_irq(target->scsi_host->host_lock); 1345 1346 if (req->cmd_done) { 1347 srp_remove_req(target, req); 1348 scmnd->scsi_done(scmnd); 1349 } else if (!req->tsk_status) { 1350 srp_remove_req(target, req); 1351 scmnd->result = DID_ABORT << 16; 1352 } else 1353 ret = FAILED; 1354 1355 spin_unlock_irq(target->scsi_host->host_lock); 1356 1357 return ret; 1358 } 1359 1360 static int srp_reset_device(struct scsi_cmnd *scmnd) 1361 { 1362 struct srp_target_port *target = host_to_target(scmnd->device->host); 1363 struct srp_request *req, *tmp; 1364 1365 printk(KERN_ERR "SRP reset_device called\n"); 1366 1367 if (target->qp_in_error) 1368 return FAILED; 1369 if (srp_find_req(target, scmnd, &req)) 1370 return FAILED; 1371 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1372 return FAILED; 1373 if (req->tsk_status) 1374 return FAILED; 1375 1376 spin_lock_irq(target->scsi_host->host_lock); 1377 1378 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1379 if (req->scmnd->device == scmnd->device) 1380 srp_reset_req(target, req); 1381 1382 spin_unlock_irq(target->scsi_host->host_lock); 1383 1384 return SUCCESS; 1385 } 1386 1387 static int srp_reset_host(struct scsi_cmnd *scmnd) 1388 { 1389 struct srp_target_port *target = host_to_target(scmnd->device->host); 1390 int ret = FAILED; 1391 1392 printk(KERN_ERR PFX "SRP reset_host called\n"); 1393 1394 if (!srp_reconnect_target(target)) 1395 ret = SUCCESS; 1396 1397 return ret; 1398 } 1399 1400 static ssize_t show_id_ext(struct class_device *cdev, char *buf) 1401 { 1402 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1403 1404 if (target->state == SRP_TARGET_DEAD || 1405 target->state == SRP_TARGET_REMOVED) 1406 return -ENODEV; 1407 1408 return sprintf(buf, "0x%016llx\n", 1409 (unsigned long long) be64_to_cpu(target->id_ext)); 1410 } 1411 1412 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf) 1413 { 1414 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1415 1416 if (target->state == SRP_TARGET_DEAD || 1417 target->state == SRP_TARGET_REMOVED) 1418 return -ENODEV; 1419 1420 return sprintf(buf, "0x%016llx\n", 1421 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1422 } 1423 1424 static ssize_t show_service_id(struct class_device *cdev, char *buf) 1425 { 1426 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1427 1428 if (target->state == SRP_TARGET_DEAD || 1429 target->state == SRP_TARGET_REMOVED) 1430 return -ENODEV; 1431 1432 return sprintf(buf, "0x%016llx\n", 1433 (unsigned long long) be64_to_cpu(target->service_id)); 1434 } 1435 1436 static ssize_t show_pkey(struct class_device *cdev, char *buf) 1437 { 1438 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1439 1440 if (target->state == SRP_TARGET_DEAD || 1441 target->state == SRP_TARGET_REMOVED) 1442 return -ENODEV; 1443 1444 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1445 } 1446 1447 static ssize_t show_dgid(struct class_device *cdev, char *buf) 1448 { 1449 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1450 1451 if (target->state == SRP_TARGET_DEAD || 1452 target->state == SRP_TARGET_REMOVED) 1453 return -ENODEV; 1454 1455 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1456 be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]), 1457 be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]), 1458 be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]), 1459 be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]), 1460 be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]), 1461 be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]), 1462 be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]), 1463 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); 1464 } 1465 1466 static ssize_t show_orig_dgid(struct class_device *cdev, char *buf) 1467 { 1468 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1469 1470 if (target->state == SRP_TARGET_DEAD || 1471 target->state == SRP_TARGET_REMOVED) 1472 return -ENODEV; 1473 1474 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1475 be16_to_cpu(target->orig_dgid[0]), 1476 be16_to_cpu(target->orig_dgid[1]), 1477 be16_to_cpu(target->orig_dgid[2]), 1478 be16_to_cpu(target->orig_dgid[3]), 1479 be16_to_cpu(target->orig_dgid[4]), 1480 be16_to_cpu(target->orig_dgid[5]), 1481 be16_to_cpu(target->orig_dgid[6]), 1482 be16_to_cpu(target->orig_dgid[7])); 1483 } 1484 1485 static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf) 1486 { 1487 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1488 1489 if (target->state == SRP_TARGET_DEAD || 1490 target->state == SRP_TARGET_REMOVED) 1491 return -ENODEV; 1492 1493 return sprintf(buf, "%d\n", target->zero_req_lim); 1494 } 1495 1496 static ssize_t show_local_ib_port(struct class_device *cdev, char *buf) 1497 { 1498 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1499 1500 return sprintf(buf, "%d\n", target->srp_host->port); 1501 } 1502 1503 static ssize_t show_local_ib_device(struct class_device *cdev, char *buf) 1504 { 1505 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1506 1507 return sprintf(buf, "%s\n", target->srp_host->dev->dev->name); 1508 } 1509 1510 static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1511 static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1512 static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1513 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1514 static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1515 static CLASS_DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 1516 static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1517 static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 1518 static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 1519 1520 static struct class_device_attribute *srp_host_attrs[] = { 1521 &class_device_attr_id_ext, 1522 &class_device_attr_ioc_guid, 1523 &class_device_attr_service_id, 1524 &class_device_attr_pkey, 1525 &class_device_attr_dgid, 1526 &class_device_attr_orig_dgid, 1527 &class_device_attr_zero_req_lim, 1528 &class_device_attr_local_ib_port, 1529 &class_device_attr_local_ib_device, 1530 NULL 1531 }; 1532 1533 static struct scsi_host_template srp_template = { 1534 .module = THIS_MODULE, 1535 .name = "InfiniBand SRP initiator", 1536 .proc_name = DRV_NAME, 1537 .info = srp_target_info, 1538 .queuecommand = srp_queuecommand, 1539 .eh_abort_handler = srp_abort, 1540 .eh_device_reset_handler = srp_reset_device, 1541 .eh_host_reset_handler = srp_reset_host, 1542 .can_queue = SRP_SQ_SIZE, 1543 .this_id = -1, 1544 .cmd_per_lun = SRP_SQ_SIZE, 1545 .use_clustering = ENABLE_CLUSTERING, 1546 .shost_attrs = srp_host_attrs 1547 }; 1548 1549 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1550 { 1551 struct srp_rport_identifiers ids; 1552 struct srp_rport *rport; 1553 1554 sprintf(target->target_name, "SRP.T10:%016llX", 1555 (unsigned long long) be64_to_cpu(target->id_ext)); 1556 1557 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device)) 1558 return -ENODEV; 1559 1560 memcpy(ids.port_id, &target->id_ext, 8); 1561 memcpy(ids.port_id + 8, &target->ioc_guid, 8); 1562 ids.roles = SRP_RPORT_ROLE_TARGET; 1563 rport = srp_rport_add(target->scsi_host, &ids); 1564 if (IS_ERR(rport)) { 1565 scsi_remove_host(target->scsi_host); 1566 return PTR_ERR(rport); 1567 } 1568 1569 spin_lock(&host->target_lock); 1570 list_add_tail(&target->list, &host->target_list); 1571 spin_unlock(&host->target_lock); 1572 1573 target->state = SRP_TARGET_LIVE; 1574 1575 scsi_scan_target(&target->scsi_host->shost_gendev, 1576 0, target->scsi_id, SCAN_WILD_CARD, 0); 1577 1578 return 0; 1579 } 1580 1581 static void srp_release_class_dev(struct class_device *class_dev) 1582 { 1583 struct srp_host *host = 1584 container_of(class_dev, struct srp_host, class_dev); 1585 1586 complete(&host->released); 1587 } 1588 1589 static struct class srp_class = { 1590 .name = "infiniband_srp", 1591 .release = srp_release_class_dev 1592 }; 1593 1594 /* 1595 * Target ports are added by writing 1596 * 1597 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1598 * pkey=<P_Key>,service_id=<service ID> 1599 * 1600 * to the add_target sysfs attribute. 1601 */ 1602 enum { 1603 SRP_OPT_ERR = 0, 1604 SRP_OPT_ID_EXT = 1 << 0, 1605 SRP_OPT_IOC_GUID = 1 << 1, 1606 SRP_OPT_DGID = 1 << 2, 1607 SRP_OPT_PKEY = 1 << 3, 1608 SRP_OPT_SERVICE_ID = 1 << 4, 1609 SRP_OPT_MAX_SECT = 1 << 5, 1610 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 1611 SRP_OPT_IO_CLASS = 1 << 7, 1612 SRP_OPT_INITIATOR_EXT = 1 << 8, 1613 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1614 SRP_OPT_IOC_GUID | 1615 SRP_OPT_DGID | 1616 SRP_OPT_PKEY | 1617 SRP_OPT_SERVICE_ID), 1618 }; 1619 1620 static match_table_t srp_opt_tokens = { 1621 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1622 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1623 { SRP_OPT_DGID, "dgid=%s" }, 1624 { SRP_OPT_PKEY, "pkey=%x" }, 1625 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1626 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1627 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 1628 { SRP_OPT_IO_CLASS, "io_class=%x" }, 1629 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 1630 { SRP_OPT_ERR, NULL } 1631 }; 1632 1633 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1634 { 1635 char *options, *sep_opt; 1636 char *p; 1637 char dgid[3]; 1638 substring_t args[MAX_OPT_ARGS]; 1639 int opt_mask = 0; 1640 int token; 1641 int ret = -EINVAL; 1642 int i; 1643 1644 options = kstrdup(buf, GFP_KERNEL); 1645 if (!options) 1646 return -ENOMEM; 1647 1648 sep_opt = options; 1649 while ((p = strsep(&sep_opt, ",")) != NULL) { 1650 if (!*p) 1651 continue; 1652 1653 token = match_token(p, srp_opt_tokens, args); 1654 opt_mask |= token; 1655 1656 switch (token) { 1657 case SRP_OPT_ID_EXT: 1658 p = match_strdup(args); 1659 if (!p) { 1660 ret = -ENOMEM; 1661 goto out; 1662 } 1663 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1664 kfree(p); 1665 break; 1666 1667 case SRP_OPT_IOC_GUID: 1668 p = match_strdup(args); 1669 if (!p) { 1670 ret = -ENOMEM; 1671 goto out; 1672 } 1673 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1674 kfree(p); 1675 break; 1676 1677 case SRP_OPT_DGID: 1678 p = match_strdup(args); 1679 if (!p) { 1680 ret = -ENOMEM; 1681 goto out; 1682 } 1683 if (strlen(p) != 32) { 1684 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1685 kfree(p); 1686 goto out; 1687 } 1688 1689 for (i = 0; i < 16; ++i) { 1690 strlcpy(dgid, p + i * 2, 3); 1691 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1692 } 1693 kfree(p); 1694 memcpy(target->orig_dgid, target->path.dgid.raw, 16); 1695 break; 1696 1697 case SRP_OPT_PKEY: 1698 if (match_hex(args, &token)) { 1699 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1700 goto out; 1701 } 1702 target->path.pkey = cpu_to_be16(token); 1703 break; 1704 1705 case SRP_OPT_SERVICE_ID: 1706 p = match_strdup(args); 1707 if (!p) { 1708 ret = -ENOMEM; 1709 goto out; 1710 } 1711 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1712 target->path.service_id = target->service_id; 1713 kfree(p); 1714 break; 1715 1716 case SRP_OPT_MAX_SECT: 1717 if (match_int(args, &token)) { 1718 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1719 goto out; 1720 } 1721 target->scsi_host->max_sectors = token; 1722 break; 1723 1724 case SRP_OPT_MAX_CMD_PER_LUN: 1725 if (match_int(args, &token)) { 1726 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); 1727 goto out; 1728 } 1729 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); 1730 break; 1731 1732 case SRP_OPT_IO_CLASS: 1733 if (match_hex(args, &token)) { 1734 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p); 1735 goto out; 1736 } 1737 if (token != SRP_REV10_IB_IO_CLASS && 1738 token != SRP_REV16A_IB_IO_CLASS) { 1739 printk(KERN_WARNING PFX "unknown IO class parameter value" 1740 " %x specified (use %x or %x).\n", 1741 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); 1742 goto out; 1743 } 1744 target->io_class = token; 1745 break; 1746 1747 case SRP_OPT_INITIATOR_EXT: 1748 p = match_strdup(args); 1749 if (!p) { 1750 ret = -ENOMEM; 1751 goto out; 1752 } 1753 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1754 kfree(p); 1755 break; 1756 1757 default: 1758 printk(KERN_WARNING PFX "unknown parameter or missing value " 1759 "'%s' in target creation request\n", p); 1760 goto out; 1761 } 1762 } 1763 1764 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1765 ret = 0; 1766 else 1767 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1768 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1769 !(srp_opt_tokens[i].token & opt_mask)) 1770 printk(KERN_WARNING PFX "target creation request is " 1771 "missing parameter '%s'\n", 1772 srp_opt_tokens[i].pattern); 1773 1774 out: 1775 kfree(options); 1776 return ret; 1777 } 1778 1779 static ssize_t srp_create_target(struct class_device *class_dev, 1780 const char *buf, size_t count) 1781 { 1782 struct srp_host *host = 1783 container_of(class_dev, struct srp_host, class_dev); 1784 struct Scsi_Host *target_host; 1785 struct srp_target_port *target; 1786 int ret; 1787 int i; 1788 1789 target_host = scsi_host_alloc(&srp_template, 1790 sizeof (struct srp_target_port)); 1791 if (!target_host) 1792 return -ENOMEM; 1793 1794 target_host->transportt = ib_srp_transport_template; 1795 target_host->max_lun = SRP_MAX_LUN; 1796 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 1797 1798 target = host_to_target(target_host); 1799 1800 target->io_class = SRP_REV16A_IB_IO_CLASS; 1801 target->scsi_host = target_host; 1802 target->srp_host = host; 1803 1804 INIT_LIST_HEAD(&target->free_reqs); 1805 INIT_LIST_HEAD(&target->req_queue); 1806 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1807 target->req_ring[i].index = i; 1808 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1809 } 1810 1811 ret = srp_parse_options(buf, target); 1812 if (ret) 1813 goto err; 1814 1815 ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid); 1816 1817 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1818 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1819 (unsigned long long) be64_to_cpu(target->id_ext), 1820 (unsigned long long) be64_to_cpu(target->ioc_guid), 1821 be16_to_cpu(target->path.pkey), 1822 (unsigned long long) be64_to_cpu(target->service_id), 1823 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]), 1824 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]), 1825 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]), 1826 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]), 1827 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]), 1828 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]), 1829 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]), 1830 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14])); 1831 1832 ret = srp_create_target_ib(target); 1833 if (ret) 1834 goto err; 1835 1836 target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target); 1837 if (IS_ERR(target->cm_id)) { 1838 ret = PTR_ERR(target->cm_id); 1839 goto err_free; 1840 } 1841 1842 target->qp_in_error = 0; 1843 ret = srp_connect_target(target); 1844 if (ret) { 1845 printk(KERN_ERR PFX "Connection failed\n"); 1846 goto err_cm_id; 1847 } 1848 1849 ret = srp_add_target(host, target); 1850 if (ret) 1851 goto err_disconnect; 1852 1853 return count; 1854 1855 err_disconnect: 1856 srp_disconnect_target(target); 1857 1858 err_cm_id: 1859 ib_destroy_cm_id(target->cm_id); 1860 1861 err_free: 1862 srp_free_target_ib(target); 1863 1864 err: 1865 scsi_host_put(target_host); 1866 1867 return ret; 1868 } 1869 1870 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 1871 1872 static ssize_t show_ibdev(struct class_device *class_dev, char *buf) 1873 { 1874 struct srp_host *host = 1875 container_of(class_dev, struct srp_host, class_dev); 1876 1877 return sprintf(buf, "%s\n", host->dev->dev->name); 1878 } 1879 1880 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1881 1882 static ssize_t show_port(struct class_device *class_dev, char *buf) 1883 { 1884 struct srp_host *host = 1885 container_of(class_dev, struct srp_host, class_dev); 1886 1887 return sprintf(buf, "%d\n", host->port); 1888 } 1889 1890 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1891 1892 static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 1893 { 1894 struct srp_host *host; 1895 1896 host = kzalloc(sizeof *host, GFP_KERNEL); 1897 if (!host) 1898 return NULL; 1899 1900 INIT_LIST_HEAD(&host->target_list); 1901 spin_lock_init(&host->target_lock); 1902 init_completion(&host->released); 1903 host->dev = device; 1904 host->port = port; 1905 1906 host->class_dev.class = &srp_class; 1907 host->class_dev.dev = device->dev->dma_device; 1908 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", 1909 device->dev->name, port); 1910 1911 if (class_device_register(&host->class_dev)) 1912 goto free_host; 1913 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) 1914 goto err_class; 1915 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) 1916 goto err_class; 1917 if (class_device_create_file(&host->class_dev, &class_device_attr_port)) 1918 goto err_class; 1919 1920 return host; 1921 1922 err_class: 1923 class_device_unregister(&host->class_dev); 1924 1925 free_host: 1926 kfree(host); 1927 1928 return NULL; 1929 } 1930 1931 static void srp_add_one(struct ib_device *device) 1932 { 1933 struct srp_device *srp_dev; 1934 struct ib_device_attr *dev_attr; 1935 struct ib_fmr_pool_param fmr_param; 1936 struct srp_host *host; 1937 int s, e, p; 1938 1939 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 1940 if (!dev_attr) 1941 return; 1942 1943 if (ib_query_device(device, dev_attr)) { 1944 printk(KERN_WARNING PFX "Query device failed for %s\n", 1945 device->name); 1946 goto free_attr; 1947 } 1948 1949 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 1950 if (!srp_dev) 1951 goto free_attr; 1952 1953 /* 1954 * Use the smallest page size supported by the HCA, down to a 1955 * minimum of 512 bytes (which is the smallest sector that a 1956 * SCSI command will ever carry). 1957 */ 1958 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); 1959 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; 1960 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 1961 1962 INIT_LIST_HEAD(&srp_dev->dev_list); 1963 1964 srp_dev->dev = device; 1965 srp_dev->pd = ib_alloc_pd(device); 1966 if (IS_ERR(srp_dev->pd)) 1967 goto free_dev; 1968 1969 srp_dev->mr = ib_get_dma_mr(srp_dev->pd, 1970 IB_ACCESS_LOCAL_WRITE | 1971 IB_ACCESS_REMOTE_READ | 1972 IB_ACCESS_REMOTE_WRITE); 1973 if (IS_ERR(srp_dev->mr)) 1974 goto err_pd; 1975 1976 memset(&fmr_param, 0, sizeof fmr_param); 1977 fmr_param.pool_size = SRP_FMR_POOL_SIZE; 1978 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 1979 fmr_param.cache = 1; 1980 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; 1981 fmr_param.page_shift = srp_dev->fmr_page_shift; 1982 fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 1983 IB_ACCESS_REMOTE_WRITE | 1984 IB_ACCESS_REMOTE_READ); 1985 1986 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); 1987 if (IS_ERR(srp_dev->fmr_pool)) 1988 srp_dev->fmr_pool = NULL; 1989 1990 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1991 s = 0; 1992 e = 0; 1993 } else { 1994 s = 1; 1995 e = device->phys_port_cnt; 1996 } 1997 1998 for (p = s; p <= e; ++p) { 1999 host = srp_add_port(srp_dev, p); 2000 if (host) 2001 list_add_tail(&host->list, &srp_dev->dev_list); 2002 } 2003 2004 ib_set_client_data(device, &srp_client, srp_dev); 2005 2006 goto free_attr; 2007 2008 err_pd: 2009 ib_dealloc_pd(srp_dev->pd); 2010 2011 free_dev: 2012 kfree(srp_dev); 2013 2014 free_attr: 2015 kfree(dev_attr); 2016 } 2017 2018 static void srp_remove_one(struct ib_device *device) 2019 { 2020 struct srp_device *srp_dev; 2021 struct srp_host *host, *tmp_host; 2022 LIST_HEAD(target_list); 2023 struct srp_target_port *target, *tmp_target; 2024 2025 srp_dev = ib_get_client_data(device, &srp_client); 2026 2027 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 2028 class_device_unregister(&host->class_dev); 2029 /* 2030 * Wait for the sysfs entry to go away, so that no new 2031 * target ports can be created. 2032 */ 2033 wait_for_completion(&host->released); 2034 2035 /* 2036 * Mark all target ports as removed, so we stop queueing 2037 * commands and don't try to reconnect. 2038 */ 2039 spin_lock(&host->target_lock); 2040 list_for_each_entry(target, &host->target_list, list) { 2041 spin_lock_irq(target->scsi_host->host_lock); 2042 target->state = SRP_TARGET_REMOVED; 2043 spin_unlock_irq(target->scsi_host->host_lock); 2044 } 2045 spin_unlock(&host->target_lock); 2046 2047 /* 2048 * Wait for any reconnection tasks that may have 2049 * started before we marked our target ports as 2050 * removed, and any target port removal tasks. 2051 */ 2052 flush_scheduled_work(); 2053 2054 list_for_each_entry_safe(target, tmp_target, 2055 &host->target_list, list) { 2056 scsi_remove_host(target->scsi_host); 2057 srp_disconnect_target(target); 2058 ib_destroy_cm_id(target->cm_id); 2059 srp_free_target_ib(target); 2060 scsi_host_put(target->scsi_host); 2061 } 2062 2063 kfree(host); 2064 } 2065 2066 if (srp_dev->fmr_pool) 2067 ib_destroy_fmr_pool(srp_dev->fmr_pool); 2068 ib_dereg_mr(srp_dev->mr); 2069 ib_dealloc_pd(srp_dev->pd); 2070 2071 kfree(srp_dev); 2072 } 2073 2074 static struct srp_function_template ib_srp_transport_functions = { 2075 }; 2076 2077 static int __init srp_init_module(void) 2078 { 2079 int ret; 2080 2081 ib_srp_transport_template = 2082 srp_attach_transport(&ib_srp_transport_functions); 2083 if (!ib_srp_transport_template) 2084 return -ENOMEM; 2085 2086 srp_template.sg_tablesize = srp_sg_tablesize; 2087 srp_max_iu_len = (sizeof (struct srp_cmd) + 2088 sizeof (struct srp_indirect_buf) + 2089 srp_sg_tablesize * 16); 2090 2091 ret = class_register(&srp_class); 2092 if (ret) { 2093 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 2094 srp_release_transport(ib_srp_transport_template); 2095 return ret; 2096 } 2097 2098 ib_sa_register_client(&srp_sa_client); 2099 2100 ret = ib_register_client(&srp_client); 2101 if (ret) { 2102 printk(KERN_ERR PFX "couldn't register IB client\n"); 2103 srp_release_transport(ib_srp_transport_template); 2104 ib_sa_unregister_client(&srp_sa_client); 2105 class_unregister(&srp_class); 2106 return ret; 2107 } 2108 2109 return 0; 2110 } 2111 2112 static void __exit srp_cleanup_module(void) 2113 { 2114 ib_unregister_client(&srp_client); 2115 ib_sa_unregister_client(&srp_sa_client); 2116 class_unregister(&srp_class); 2117 srp_release_transport(ib_srp_transport_template); 2118 } 2119 2120 module_init(srp_init_module); 2121 module_exit(srp_cleanup_module); 2122