1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 #include <linux/err.h> 37 #include <linux/string.h> 38 #include <linux/parser.h> 39 #include <linux/random.h> 40 #include <linux/jiffies.h> 41 42 #include <asm/atomic.h> 43 44 #include <scsi/scsi.h> 45 #include <scsi/scsi_device.h> 46 #include <scsi/scsi_dbg.h> 47 #include <scsi/srp.h> 48 #include <scsi/scsi_transport_srp.h> 49 50 #include "ib_srp.h" 51 52 #define DRV_NAME "ib_srp" 53 #define PFX DRV_NAME ": " 54 #define DRV_VERSION "0.2" 55 #define DRV_RELDATE "November 1, 2005" 56 57 MODULE_AUTHOR("Roland Dreier"); 58 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 59 "v" DRV_VERSION " (" DRV_RELDATE ")"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 62 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE; 63 static int srp_max_iu_len; 64 65 module_param(srp_sg_tablesize, int, 0444); 66 MODULE_PARM_DESC(srp_sg_tablesize, 67 "Max number of gather/scatter entries per I/O (default is 12, max 255)"); 68 69 static int topspin_workarounds = 1; 70 71 module_param(topspin_workarounds, int, 0444); 72 MODULE_PARM_DESC(topspin_workarounds, 73 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 74 75 static int mellanox_workarounds = 1; 76 77 module_param(mellanox_workarounds, int, 0444); 78 MODULE_PARM_DESC(mellanox_workarounds, 79 "Enable workarounds for Mellanox SRP target bugs if != 0"); 80 81 static void srp_add_one(struct ib_device *device); 82 static void srp_remove_one(struct ib_device *device); 83 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); 84 static void srp_send_completion(struct ib_cq *cq, void *target_ptr); 85 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 86 87 static struct scsi_transport_template *ib_srp_transport_template; 88 89 static struct ib_client srp_client = { 90 .name = "srp", 91 .add = srp_add_one, 92 .remove = srp_remove_one 93 }; 94 95 static struct ib_sa_client srp_sa_client; 96 97 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 98 { 99 return (struct srp_target_port *) host->hostdata; 100 } 101 102 static const char *srp_target_info(struct Scsi_Host *host) 103 { 104 return host_to_target(host)->target_name; 105 } 106 107 static int srp_target_is_topspin(struct srp_target_port *target) 108 { 109 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 110 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 111 112 return topspin_workarounds && 113 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 114 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 115 } 116 117 static int srp_target_is_mellanox(struct srp_target_port *target) 118 { 119 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; 120 121 return mellanox_workarounds && 122 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui); 123 } 124 125 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 126 gfp_t gfp_mask, 127 enum dma_data_direction direction) 128 { 129 struct srp_iu *iu; 130 131 iu = kmalloc(sizeof *iu, gfp_mask); 132 if (!iu) 133 goto out; 134 135 iu->buf = kzalloc(size, gfp_mask); 136 if (!iu->buf) 137 goto out_free_iu; 138 139 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 140 direction); 141 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 142 goto out_free_buf; 143 144 iu->size = size; 145 iu->direction = direction; 146 147 return iu; 148 149 out_free_buf: 150 kfree(iu->buf); 151 out_free_iu: 152 kfree(iu); 153 out: 154 return NULL; 155 } 156 157 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 158 { 159 if (!iu) 160 return; 161 162 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 163 iu->direction); 164 kfree(iu->buf); 165 kfree(iu); 166 } 167 168 static void srp_qp_event(struct ib_event *event, void *context) 169 { 170 printk(KERN_ERR PFX "QP event %d\n", event->event); 171 } 172 173 static int srp_init_qp(struct srp_target_port *target, 174 struct ib_qp *qp) 175 { 176 struct ib_qp_attr *attr; 177 int ret; 178 179 attr = kmalloc(sizeof *attr, GFP_KERNEL); 180 if (!attr) 181 return -ENOMEM; 182 183 ret = ib_find_pkey(target->srp_host->srp_dev->dev, 184 target->srp_host->port, 185 be16_to_cpu(target->path.pkey), 186 &attr->pkey_index); 187 if (ret) 188 goto out; 189 190 attr->qp_state = IB_QPS_INIT; 191 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 192 IB_ACCESS_REMOTE_WRITE); 193 attr->port_num = target->srp_host->port; 194 195 ret = ib_modify_qp(qp, attr, 196 IB_QP_STATE | 197 IB_QP_PKEY_INDEX | 198 IB_QP_ACCESS_FLAGS | 199 IB_QP_PORT); 200 201 out: 202 kfree(attr); 203 return ret; 204 } 205 206 static int srp_new_cm_id(struct srp_target_port *target) 207 { 208 struct ib_cm_id *new_cm_id; 209 210 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 211 srp_cm_handler, target); 212 if (IS_ERR(new_cm_id)) 213 return PTR_ERR(new_cm_id); 214 215 if (target->cm_id) 216 ib_destroy_cm_id(target->cm_id); 217 target->cm_id = new_cm_id; 218 219 return 0; 220 } 221 222 static int srp_create_target_ib(struct srp_target_port *target) 223 { 224 struct ib_qp_init_attr *init_attr; 225 int ret; 226 227 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 228 if (!init_attr) 229 return -ENOMEM; 230 231 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, 232 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); 233 if (IS_ERR(target->recv_cq)) { 234 ret = PTR_ERR(target->recv_cq); 235 goto err; 236 } 237 238 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev, 239 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); 240 if (IS_ERR(target->send_cq)) { 241 ret = PTR_ERR(target->send_cq); 242 goto err_recv_cq; 243 } 244 245 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP); 246 247 init_attr->event_handler = srp_qp_event; 248 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 249 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 250 init_attr->cap.max_recv_sge = 1; 251 init_attr->cap.max_send_sge = 1; 252 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 253 init_attr->qp_type = IB_QPT_RC; 254 init_attr->send_cq = target->send_cq; 255 init_attr->recv_cq = target->recv_cq; 256 257 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); 258 if (IS_ERR(target->qp)) { 259 ret = PTR_ERR(target->qp); 260 goto err_send_cq; 261 } 262 263 ret = srp_init_qp(target, target->qp); 264 if (ret) 265 goto err_qp; 266 267 kfree(init_attr); 268 return 0; 269 270 err_qp: 271 ib_destroy_qp(target->qp); 272 273 err_send_cq: 274 ib_destroy_cq(target->send_cq); 275 276 err_recv_cq: 277 ib_destroy_cq(target->recv_cq); 278 279 err: 280 kfree(init_attr); 281 return ret; 282 } 283 284 static void srp_free_target_ib(struct srp_target_port *target) 285 { 286 int i; 287 288 ib_destroy_qp(target->qp); 289 ib_destroy_cq(target->send_cq); 290 ib_destroy_cq(target->recv_cq); 291 292 for (i = 0; i < SRP_RQ_SIZE; ++i) 293 srp_free_iu(target->srp_host, target->rx_ring[i]); 294 for (i = 0; i < SRP_SQ_SIZE; ++i) 295 srp_free_iu(target->srp_host, target->tx_ring[i]); 296 } 297 298 static void srp_path_rec_completion(int status, 299 struct ib_sa_path_rec *pathrec, 300 void *target_ptr) 301 { 302 struct srp_target_port *target = target_ptr; 303 304 target->status = status; 305 if (status) 306 shost_printk(KERN_ERR, target->scsi_host, 307 PFX "Got failed path rec status %d\n", status); 308 else 309 target->path = *pathrec; 310 complete(&target->done); 311 } 312 313 static int srp_lookup_path(struct srp_target_port *target) 314 { 315 target->path.numb_path = 1; 316 317 init_completion(&target->done); 318 319 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 320 target->srp_host->srp_dev->dev, 321 target->srp_host->port, 322 &target->path, 323 IB_SA_PATH_REC_SERVICE_ID | 324 IB_SA_PATH_REC_DGID | 325 IB_SA_PATH_REC_SGID | 326 IB_SA_PATH_REC_NUMB_PATH | 327 IB_SA_PATH_REC_PKEY, 328 SRP_PATH_REC_TIMEOUT_MS, 329 GFP_KERNEL, 330 srp_path_rec_completion, 331 target, &target->path_query); 332 if (target->path_query_id < 0) 333 return target->path_query_id; 334 335 wait_for_completion(&target->done); 336 337 if (target->status < 0) 338 shost_printk(KERN_WARNING, target->scsi_host, 339 PFX "Path record query failed\n"); 340 341 return target->status; 342 } 343 344 static int srp_send_req(struct srp_target_port *target) 345 { 346 struct { 347 struct ib_cm_req_param param; 348 struct srp_login_req priv; 349 } *req = NULL; 350 int status; 351 352 req = kzalloc(sizeof *req, GFP_KERNEL); 353 if (!req) 354 return -ENOMEM; 355 356 req->param.primary_path = &target->path; 357 req->param.alternate_path = NULL; 358 req->param.service_id = target->service_id; 359 req->param.qp_num = target->qp->qp_num; 360 req->param.qp_type = target->qp->qp_type; 361 req->param.private_data = &req->priv; 362 req->param.private_data_len = sizeof req->priv; 363 req->param.flow_control = 1; 364 365 get_random_bytes(&req->param.starting_psn, 4); 366 req->param.starting_psn &= 0xffffff; 367 368 /* 369 * Pick some arbitrary defaults here; we could make these 370 * module parameters if anyone cared about setting them. 371 */ 372 req->param.responder_resources = 4; 373 req->param.remote_cm_response_timeout = 20; 374 req->param.local_cm_response_timeout = 20; 375 req->param.retry_count = 7; 376 req->param.rnr_retry_count = 7; 377 req->param.max_cm_retries = 15; 378 379 req->priv.opcode = SRP_LOGIN_REQ; 380 req->priv.tag = 0; 381 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len); 382 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 383 SRP_BUF_FORMAT_INDIRECT); 384 /* 385 * In the published SRP specification (draft rev. 16a), the 386 * port identifier format is 8 bytes of ID extension followed 387 * by 8 bytes of GUID. Older drafts put the two halves in the 388 * opposite order, so that the GUID comes first. 389 * 390 * Targets conforming to these obsolete drafts can be 391 * recognized by the I/O Class they report. 392 */ 393 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 394 memcpy(req->priv.initiator_port_id, 395 &target->path.sgid.global.interface_id, 8); 396 memcpy(req->priv.initiator_port_id + 8, 397 &target->initiator_ext, 8); 398 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 399 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 400 } else { 401 memcpy(req->priv.initiator_port_id, 402 &target->initiator_ext, 8); 403 memcpy(req->priv.initiator_port_id + 8, 404 &target->path.sgid.global.interface_id, 8); 405 memcpy(req->priv.target_port_id, &target->id_ext, 8); 406 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 407 } 408 409 /* 410 * Topspin/Cisco SRP targets will reject our login unless we 411 * zero out the first 8 bytes of our initiator port ID and set 412 * the second 8 bytes to the local node GUID. 413 */ 414 if (srp_target_is_topspin(target)) { 415 shost_printk(KERN_DEBUG, target->scsi_host, 416 PFX "Topspin/Cisco initiator port ID workaround " 417 "activated for target GUID %016llx\n", 418 (unsigned long long) be64_to_cpu(target->ioc_guid)); 419 memset(req->priv.initiator_port_id, 0, 8); 420 memcpy(req->priv.initiator_port_id + 8, 421 &target->srp_host->srp_dev->dev->node_guid, 8); 422 } 423 424 status = ib_send_cm_req(target->cm_id, &req->param); 425 426 kfree(req); 427 428 return status; 429 } 430 431 static void srp_disconnect_target(struct srp_target_port *target) 432 { 433 /* XXX should send SRP_I_LOGOUT request */ 434 435 init_completion(&target->done); 436 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 437 shost_printk(KERN_DEBUG, target->scsi_host, 438 PFX "Sending CM DREQ failed\n"); 439 return; 440 } 441 wait_for_completion(&target->done); 442 } 443 444 static bool srp_change_state(struct srp_target_port *target, 445 enum srp_target_state old, 446 enum srp_target_state new) 447 { 448 bool changed = false; 449 450 spin_lock_irq(&target->lock); 451 if (target->state == old) { 452 target->state = new; 453 changed = true; 454 } 455 spin_unlock_irq(&target->lock); 456 return changed; 457 } 458 459 static void srp_remove_work(struct work_struct *work) 460 { 461 struct srp_target_port *target = 462 container_of(work, struct srp_target_port, work); 463 464 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED)) 465 return; 466 467 spin_lock(&target->srp_host->target_lock); 468 list_del(&target->list); 469 spin_unlock(&target->srp_host->target_lock); 470 471 srp_remove_host(target->scsi_host); 472 scsi_remove_host(target->scsi_host); 473 ib_destroy_cm_id(target->cm_id); 474 srp_free_target_ib(target); 475 scsi_host_put(target->scsi_host); 476 } 477 478 static int srp_connect_target(struct srp_target_port *target) 479 { 480 int retries = 3; 481 int ret; 482 483 ret = srp_lookup_path(target); 484 if (ret) 485 return ret; 486 487 while (1) { 488 init_completion(&target->done); 489 ret = srp_send_req(target); 490 if (ret) 491 return ret; 492 wait_for_completion(&target->done); 493 494 /* 495 * The CM event handling code will set status to 496 * SRP_PORT_REDIRECT if we get a port redirect REJ 497 * back, or SRP_DLID_REDIRECT if we get a lid/qp 498 * redirect REJ back. 499 */ 500 switch (target->status) { 501 case 0: 502 return 0; 503 504 case SRP_PORT_REDIRECT: 505 ret = srp_lookup_path(target); 506 if (ret) 507 return ret; 508 break; 509 510 case SRP_DLID_REDIRECT: 511 break; 512 513 case SRP_STALE_CONN: 514 /* Our current CM id was stale, and is now in timewait. 515 * Try to reconnect with a new one. 516 */ 517 if (!retries-- || srp_new_cm_id(target)) { 518 shost_printk(KERN_ERR, target->scsi_host, PFX 519 "giving up on stale connection\n"); 520 target->status = -ECONNRESET; 521 return target->status; 522 } 523 524 shost_printk(KERN_ERR, target->scsi_host, PFX 525 "retrying stale connection\n"); 526 break; 527 528 default: 529 return target->status; 530 } 531 } 532 } 533 534 static void srp_unmap_data(struct scsi_cmnd *scmnd, 535 struct srp_target_port *target, 536 struct srp_request *req) 537 { 538 if (!scsi_sglist(scmnd) || 539 (scmnd->sc_data_direction != DMA_TO_DEVICE && 540 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 541 return; 542 543 if (req->fmr) { 544 ib_fmr_pool_unmap(req->fmr); 545 req->fmr = NULL; 546 } 547 548 ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd), 549 scsi_sg_count(scmnd), scmnd->sc_data_direction); 550 } 551 552 static void srp_remove_req(struct srp_target_port *target, 553 struct srp_request *req, s32 req_lim_delta) 554 { 555 unsigned long flags; 556 557 srp_unmap_data(req->scmnd, target, req); 558 spin_lock_irqsave(&target->lock, flags); 559 target->req_lim += req_lim_delta; 560 req->scmnd = NULL; 561 list_add_tail(&req->list, &target->free_reqs); 562 spin_unlock_irqrestore(&target->lock, flags); 563 } 564 565 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 566 { 567 req->scmnd->result = DID_RESET << 16; 568 req->scmnd->scsi_done(req->scmnd); 569 srp_remove_req(target, req, 0); 570 } 571 572 static int srp_reconnect_target(struct srp_target_port *target) 573 { 574 struct ib_qp_attr qp_attr; 575 struct ib_wc wc; 576 int i, ret; 577 578 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING)) 579 return -EAGAIN; 580 581 srp_disconnect_target(target); 582 /* 583 * Now get a new local CM ID so that we avoid confusing the 584 * target in case things are really fouled up. 585 */ 586 ret = srp_new_cm_id(target); 587 if (ret) 588 goto err; 589 590 qp_attr.qp_state = IB_QPS_RESET; 591 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 592 if (ret) 593 goto err; 594 595 ret = srp_init_qp(target, target->qp); 596 if (ret) 597 goto err; 598 599 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0) 600 ; /* nothing */ 601 while (ib_poll_cq(target->send_cq, 1, &wc) > 0) 602 ; /* nothing */ 603 604 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 605 struct srp_request *req = &target->req_ring[i]; 606 if (req->scmnd) 607 srp_reset_req(target, req); 608 } 609 610 INIT_LIST_HEAD(&target->free_tx); 611 for (i = 0; i < SRP_SQ_SIZE; ++i) 612 list_add(&target->tx_ring[i]->list, &target->free_tx); 613 614 target->qp_in_error = 0; 615 ret = srp_connect_target(target); 616 if (ret) 617 goto err; 618 619 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE)) 620 ret = -EAGAIN; 621 622 return ret; 623 624 err: 625 shost_printk(KERN_ERR, target->scsi_host, 626 PFX "reconnect failed (%d), removing target port.\n", ret); 627 628 /* 629 * We couldn't reconnect, so kill our target port off. 630 * However, we have to defer the real removal because we 631 * are in the context of the SCSI error handler now, which 632 * will deadlock if we call scsi_remove_host(). 633 * 634 * Schedule our work inside the lock to avoid a race with 635 * the flush_scheduled_work() in srp_remove_one(). 636 */ 637 spin_lock_irq(&target->lock); 638 if (target->state == SRP_TARGET_CONNECTING) { 639 target->state = SRP_TARGET_DEAD; 640 INIT_WORK(&target->work, srp_remove_work); 641 queue_work(ib_wq, &target->work); 642 } 643 spin_unlock_irq(&target->lock); 644 645 return ret; 646 } 647 648 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, 649 int sg_cnt, struct srp_request *req, 650 struct srp_direct_buf *buf) 651 { 652 u64 io_addr = 0; 653 u64 *dma_pages; 654 u32 len; 655 int page_cnt; 656 int i, j; 657 int ret; 658 struct srp_device *dev = target->srp_host->srp_dev; 659 struct ib_device *ibdev = dev->dev; 660 struct scatterlist *sg; 661 662 if (!dev->fmr_pool) 663 return -ENODEV; 664 665 if (srp_target_is_mellanox(target) && 666 (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask)) 667 return -EINVAL; 668 669 len = page_cnt = 0; 670 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 671 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 672 673 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { 674 if (i > 0) 675 return -EINVAL; 676 else 677 ++page_cnt; 678 } 679 if ((ib_sg_dma_address(ibdev, sg) + dma_len) & 680 ~dev->fmr_page_mask) { 681 if (i < sg_cnt - 1) 682 return -EINVAL; 683 else 684 ++page_cnt; 685 } 686 687 len += dma_len; 688 } 689 690 page_cnt += len >> dev->fmr_page_shift; 691 if (page_cnt > SRP_FMR_SIZE) 692 return -ENOMEM; 693 694 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); 695 if (!dma_pages) 696 return -ENOMEM; 697 698 page_cnt = 0; 699 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 700 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 701 702 for (j = 0; j < dma_len; j += dev->fmr_page_size) 703 dma_pages[page_cnt++] = 704 (ib_sg_dma_address(ibdev, sg) & 705 dev->fmr_page_mask) + j; 706 } 707 708 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 709 dma_pages, page_cnt, io_addr); 710 if (IS_ERR(req->fmr)) { 711 ret = PTR_ERR(req->fmr); 712 req->fmr = NULL; 713 goto out; 714 } 715 716 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 717 ~dev->fmr_page_mask); 718 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 719 buf->len = cpu_to_be32(len); 720 721 ret = 0; 722 723 out: 724 kfree(dma_pages); 725 726 return ret; 727 } 728 729 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 730 struct srp_request *req) 731 { 732 struct scatterlist *scat; 733 struct srp_cmd *cmd = req->cmd->buf; 734 int len, nents, count; 735 u8 fmt = SRP_DATA_DESC_DIRECT; 736 struct srp_device *dev; 737 struct ib_device *ibdev; 738 739 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 740 return sizeof (struct srp_cmd); 741 742 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 743 scmnd->sc_data_direction != DMA_TO_DEVICE) { 744 shost_printk(KERN_WARNING, target->scsi_host, 745 PFX "Unhandled data direction %d\n", 746 scmnd->sc_data_direction); 747 return -EINVAL; 748 } 749 750 nents = scsi_sg_count(scmnd); 751 scat = scsi_sglist(scmnd); 752 753 dev = target->srp_host->srp_dev; 754 ibdev = dev->dev; 755 756 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 757 758 fmt = SRP_DATA_DESC_DIRECT; 759 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 760 761 if (count == 1) { 762 /* 763 * The midlayer only generated a single gather/scatter 764 * entry, or DMA mapping coalesced everything to a 765 * single entry. So a direct descriptor along with 766 * the DMA MR suffices. 767 */ 768 struct srp_direct_buf *buf = (void *) cmd->add_data; 769 770 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 771 buf->key = cpu_to_be32(target->rkey); 772 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 773 } else if (srp_map_fmr(target, scat, count, req, 774 (void *) cmd->add_data)) { 775 /* 776 * FMR mapping failed, and the scatterlist has more 777 * than one entry. Generate an indirect memory 778 * descriptor. 779 */ 780 struct srp_indirect_buf *buf = (void *) cmd->add_data; 781 struct scatterlist *sg; 782 u32 datalen = 0; 783 int i; 784 785 fmt = SRP_DATA_DESC_INDIRECT; 786 len = sizeof (struct srp_cmd) + 787 sizeof (struct srp_indirect_buf) + 788 count * sizeof (struct srp_direct_buf); 789 790 scsi_for_each_sg(scmnd, sg, count, i) { 791 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 792 793 buf->desc_list[i].va = 794 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 795 buf->desc_list[i].key = 796 cpu_to_be32(target->rkey); 797 buf->desc_list[i].len = cpu_to_be32(dma_len); 798 datalen += dma_len; 799 } 800 801 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 802 cmd->data_out_desc_cnt = count; 803 else 804 cmd->data_in_desc_cnt = count; 805 806 buf->table_desc.va = 807 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 808 buf->table_desc.key = 809 cpu_to_be32(target->rkey); 810 buf->table_desc.len = 811 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 812 813 buf->len = cpu_to_be32(datalen); 814 } 815 816 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 817 cmd->buf_fmt = fmt << 4; 818 else 819 cmd->buf_fmt = fmt; 820 821 return len; 822 } 823 824 /* 825 * Return an IU and possible credit to the free pool 826 */ 827 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, 828 enum srp_iu_type iu_type) 829 { 830 unsigned long flags; 831 832 spin_lock_irqsave(&target->lock, flags); 833 list_add(&iu->list, &target->free_tx); 834 if (iu_type != SRP_IU_RSP) 835 ++target->req_lim; 836 spin_unlock_irqrestore(&target->lock, flags); 837 } 838 839 /* 840 * Must be called with target->lock held to protect req_lim and free_tx. 841 * If IU is not sent, it must be returned using srp_put_tx_iu(). 842 * 843 * Note: 844 * An upper limit for the number of allocated information units for each 845 * request type is: 846 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 847 * more than Scsi_Host.can_queue requests. 848 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 849 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 850 * one unanswered SRP request to an initiator. 851 */ 852 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 853 enum srp_iu_type iu_type) 854 { 855 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 856 struct srp_iu *iu; 857 858 srp_send_completion(target->send_cq, target); 859 860 if (list_empty(&target->free_tx)) 861 return NULL; 862 863 /* Initiator responses to target requests do not consume credits */ 864 if (iu_type != SRP_IU_RSP) { 865 if (target->req_lim <= rsv) { 866 ++target->zero_req_lim; 867 return NULL; 868 } 869 870 --target->req_lim; 871 } 872 873 iu = list_first_entry(&target->free_tx, struct srp_iu, list); 874 list_del(&iu->list); 875 return iu; 876 } 877 878 static int srp_post_send(struct srp_target_port *target, 879 struct srp_iu *iu, int len) 880 { 881 struct ib_sge list; 882 struct ib_send_wr wr, *bad_wr; 883 884 list.addr = iu->dma; 885 list.length = len; 886 list.lkey = target->lkey; 887 888 wr.next = NULL; 889 wr.wr_id = (uintptr_t) iu; 890 wr.sg_list = &list; 891 wr.num_sge = 1; 892 wr.opcode = IB_WR_SEND; 893 wr.send_flags = IB_SEND_SIGNALED; 894 895 return ib_post_send(target->qp, &wr, &bad_wr); 896 } 897 898 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) 899 { 900 struct ib_recv_wr wr, *bad_wr; 901 struct ib_sge list; 902 903 list.addr = iu->dma; 904 list.length = iu->size; 905 list.lkey = target->lkey; 906 907 wr.next = NULL; 908 wr.wr_id = (uintptr_t) iu; 909 wr.sg_list = &list; 910 wr.num_sge = 1; 911 912 return ib_post_recv(target->qp, &wr, &bad_wr); 913 } 914 915 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 916 { 917 struct srp_request *req; 918 struct scsi_cmnd *scmnd; 919 unsigned long flags; 920 921 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 922 spin_lock_irqsave(&target->lock, flags); 923 target->req_lim += be32_to_cpu(rsp->req_lim_delta); 924 spin_unlock_irqrestore(&target->lock, flags); 925 926 target->tsk_mgmt_status = -1; 927 if (be32_to_cpu(rsp->resp_data_len) >= 4) 928 target->tsk_mgmt_status = rsp->data[3]; 929 complete(&target->tsk_mgmt_done); 930 } else { 931 req = &target->req_ring[rsp->tag]; 932 scmnd = req->scmnd; 933 if (!scmnd) 934 shost_printk(KERN_ERR, target->scsi_host, 935 "Null scmnd for RSP w/tag %016llx\n", 936 (unsigned long long) rsp->tag); 937 scmnd->result = rsp->status; 938 939 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 940 memcpy(scmnd->sense_buffer, rsp->data + 941 be32_to_cpu(rsp->resp_data_len), 942 min_t(int, be32_to_cpu(rsp->sense_data_len), 943 SCSI_SENSE_BUFFERSIZE)); 944 } 945 946 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 947 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 948 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 949 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 950 951 srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); 952 scmnd->host_scribble = NULL; 953 scmnd->scsi_done(scmnd); 954 } 955 } 956 957 static int srp_response_common(struct srp_target_port *target, s32 req_delta, 958 void *rsp, int len) 959 { 960 struct ib_device *dev = target->srp_host->srp_dev->dev; 961 unsigned long flags; 962 struct srp_iu *iu; 963 int err; 964 965 spin_lock_irqsave(&target->lock, flags); 966 target->req_lim += req_delta; 967 iu = __srp_get_tx_iu(target, SRP_IU_RSP); 968 spin_unlock_irqrestore(&target->lock, flags); 969 970 if (!iu) { 971 shost_printk(KERN_ERR, target->scsi_host, PFX 972 "no IU available to send response\n"); 973 return 1; 974 } 975 976 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 977 memcpy(iu->buf, rsp, len); 978 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 979 980 err = srp_post_send(target, iu, len); 981 if (err) { 982 shost_printk(KERN_ERR, target->scsi_host, PFX 983 "unable to post response: %d\n", err); 984 srp_put_tx_iu(target, iu, SRP_IU_RSP); 985 } 986 987 return err; 988 } 989 990 static void srp_process_cred_req(struct srp_target_port *target, 991 struct srp_cred_req *req) 992 { 993 struct srp_cred_rsp rsp = { 994 .opcode = SRP_CRED_RSP, 995 .tag = req->tag, 996 }; 997 s32 delta = be32_to_cpu(req->req_lim_delta); 998 999 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1000 shost_printk(KERN_ERR, target->scsi_host, PFX 1001 "problems processing SRP_CRED_REQ\n"); 1002 } 1003 1004 static void srp_process_aer_req(struct srp_target_port *target, 1005 struct srp_aer_req *req) 1006 { 1007 struct srp_aer_rsp rsp = { 1008 .opcode = SRP_AER_RSP, 1009 .tag = req->tag, 1010 }; 1011 s32 delta = be32_to_cpu(req->req_lim_delta); 1012 1013 shost_printk(KERN_ERR, target->scsi_host, PFX 1014 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); 1015 1016 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1017 shost_printk(KERN_ERR, target->scsi_host, PFX 1018 "problems processing SRP_AER_REQ\n"); 1019 } 1020 1021 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1022 { 1023 struct ib_device *dev = target->srp_host->srp_dev->dev; 1024 struct srp_iu *iu = (struct srp_iu *) wc->wr_id; 1025 int res; 1026 u8 opcode; 1027 1028 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 1029 DMA_FROM_DEVICE); 1030 1031 opcode = *(u8 *) iu->buf; 1032 1033 if (0) { 1034 shost_printk(KERN_ERR, target->scsi_host, 1035 PFX "recv completion, opcode 0x%02x\n", opcode); 1036 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 1037 iu->buf, wc->byte_len, true); 1038 } 1039 1040 switch (opcode) { 1041 case SRP_RSP: 1042 srp_process_rsp(target, iu->buf); 1043 break; 1044 1045 case SRP_CRED_REQ: 1046 srp_process_cred_req(target, iu->buf); 1047 break; 1048 1049 case SRP_AER_REQ: 1050 srp_process_aer_req(target, iu->buf); 1051 break; 1052 1053 case SRP_T_LOGOUT: 1054 /* XXX Handle target logout */ 1055 shost_printk(KERN_WARNING, target->scsi_host, 1056 PFX "Got target logout request\n"); 1057 break; 1058 1059 default: 1060 shost_printk(KERN_WARNING, target->scsi_host, 1061 PFX "Unhandled SRP opcode 0x%02x\n", opcode); 1062 break; 1063 } 1064 1065 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 1066 DMA_FROM_DEVICE); 1067 1068 res = srp_post_recv(target, iu); 1069 if (res != 0) 1070 shost_printk(KERN_ERR, target->scsi_host, 1071 PFX "Recv failed with error code %d\n", res); 1072 } 1073 1074 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) 1075 { 1076 struct srp_target_port *target = target_ptr; 1077 struct ib_wc wc; 1078 1079 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1080 while (ib_poll_cq(cq, 1, &wc) > 0) { 1081 if (wc.status) { 1082 shost_printk(KERN_ERR, target->scsi_host, 1083 PFX "failed receive status %d\n", 1084 wc.status); 1085 target->qp_in_error = 1; 1086 break; 1087 } 1088 1089 srp_handle_recv(target, &wc); 1090 } 1091 } 1092 1093 static void srp_send_completion(struct ib_cq *cq, void *target_ptr) 1094 { 1095 struct srp_target_port *target = target_ptr; 1096 struct ib_wc wc; 1097 struct srp_iu *iu; 1098 1099 while (ib_poll_cq(cq, 1, &wc) > 0) { 1100 if (wc.status) { 1101 shost_printk(KERN_ERR, target->scsi_host, 1102 PFX "failed send status %d\n", 1103 wc.status); 1104 target->qp_in_error = 1; 1105 break; 1106 } 1107 1108 iu = (struct srp_iu *) wc.wr_id; 1109 list_add(&iu->list, &target->free_tx); 1110 } 1111 } 1112 1113 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 1114 { 1115 struct srp_target_port *target = host_to_target(shost); 1116 struct srp_request *req; 1117 struct srp_iu *iu; 1118 struct srp_cmd *cmd; 1119 struct ib_device *dev; 1120 unsigned long flags; 1121 int len; 1122 1123 if (target->state == SRP_TARGET_CONNECTING) 1124 goto err; 1125 1126 if (target->state == SRP_TARGET_DEAD || 1127 target->state == SRP_TARGET_REMOVED) { 1128 scmnd->result = DID_BAD_TARGET << 16; 1129 scmnd->scsi_done(scmnd); 1130 return 0; 1131 } 1132 1133 spin_lock_irqsave(&target->lock, flags); 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1135 if (!iu) 1136 goto err_unlock; 1137 1138 req = list_first_entry(&target->free_reqs, struct srp_request, list); 1139 list_del(&req->list); 1140 spin_unlock_irqrestore(&target->lock, flags); 1141 1142 dev = target->srp_host->srp_dev->dev; 1143 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1144 DMA_TO_DEVICE); 1145 1146 scmnd->result = 0; 1147 scmnd->host_scribble = (void *) req; 1148 1149 cmd = iu->buf; 1150 memset(cmd, 0, sizeof *cmd); 1151 1152 cmd->opcode = SRP_CMD; 1153 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1154 cmd->tag = req->index; 1155 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 1156 1157 req->scmnd = scmnd; 1158 req->cmd = iu; 1159 1160 len = srp_map_data(scmnd, target, req); 1161 if (len < 0) { 1162 shost_printk(KERN_ERR, target->scsi_host, 1163 PFX "Failed to map data\n"); 1164 goto err_iu; 1165 } 1166 1167 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1168 DMA_TO_DEVICE); 1169 1170 if (srp_post_send(target, iu, len)) { 1171 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 1172 goto err_unmap; 1173 } 1174 1175 return 0; 1176 1177 err_unmap: 1178 srp_unmap_data(scmnd, target, req); 1179 1180 err_iu: 1181 srp_put_tx_iu(target, iu, SRP_IU_CMD); 1182 1183 spin_lock_irqsave(&target->lock, flags); 1184 list_add(&req->list, &target->free_reqs); 1185 1186 err_unlock: 1187 spin_unlock_irqrestore(&target->lock, flags); 1188 1189 err: 1190 return SCSI_MLQUEUE_HOST_BUSY; 1191 } 1192 1193 static int srp_alloc_iu_bufs(struct srp_target_port *target) 1194 { 1195 int i; 1196 1197 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1198 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 1199 target->max_ti_iu_len, 1200 GFP_KERNEL, DMA_FROM_DEVICE); 1201 if (!target->rx_ring[i]) 1202 goto err; 1203 } 1204 1205 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1206 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1207 srp_max_iu_len, 1208 GFP_KERNEL, DMA_TO_DEVICE); 1209 if (!target->tx_ring[i]) 1210 goto err; 1211 1212 list_add(&target->tx_ring[i]->list, &target->free_tx); 1213 } 1214 1215 return 0; 1216 1217 err: 1218 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1219 srp_free_iu(target->srp_host, target->rx_ring[i]); 1220 target->rx_ring[i] = NULL; 1221 } 1222 1223 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1224 srp_free_iu(target->srp_host, target->tx_ring[i]); 1225 target->tx_ring[i] = NULL; 1226 } 1227 1228 return -ENOMEM; 1229 } 1230 1231 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 1232 struct ib_cm_event *event, 1233 struct srp_target_port *target) 1234 { 1235 struct Scsi_Host *shost = target->scsi_host; 1236 struct ib_class_port_info *cpi; 1237 int opcode; 1238 1239 switch (event->param.rej_rcvd.reason) { 1240 case IB_CM_REJ_PORT_CM_REDIRECT: 1241 cpi = event->param.rej_rcvd.ari; 1242 target->path.dlid = cpi->redirect_lid; 1243 target->path.pkey = cpi->redirect_pkey; 1244 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 1245 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 1246 1247 target->status = target->path.dlid ? 1248 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 1249 break; 1250 1251 case IB_CM_REJ_PORT_REDIRECT: 1252 if (srp_target_is_topspin(target)) { 1253 /* 1254 * Topspin/Cisco SRP gateways incorrectly send 1255 * reject reason code 25 when they mean 24 1256 * (port redirect). 1257 */ 1258 memcpy(target->path.dgid.raw, 1259 event->param.rej_rcvd.ari, 16); 1260 1261 shost_printk(KERN_DEBUG, shost, 1262 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1263 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1264 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1265 1266 target->status = SRP_PORT_REDIRECT; 1267 } else { 1268 shost_printk(KERN_WARNING, shost, 1269 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1270 target->status = -ECONNRESET; 1271 } 1272 break; 1273 1274 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1275 shost_printk(KERN_WARNING, shost, 1276 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1277 target->status = -ECONNRESET; 1278 break; 1279 1280 case IB_CM_REJ_CONSUMER_DEFINED: 1281 opcode = *(u8 *) event->private_data; 1282 if (opcode == SRP_LOGIN_REJ) { 1283 struct srp_login_rej *rej = event->private_data; 1284 u32 reason = be32_to_cpu(rej->reason); 1285 1286 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1287 shost_printk(KERN_WARNING, shost, 1288 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1289 else 1290 shost_printk(KERN_WARNING, shost, 1291 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1292 } else 1293 shost_printk(KERN_WARNING, shost, 1294 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1295 " opcode 0x%02x\n", opcode); 1296 target->status = -ECONNRESET; 1297 break; 1298 1299 case IB_CM_REJ_STALE_CONN: 1300 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 1301 target->status = SRP_STALE_CONN; 1302 break; 1303 1304 default: 1305 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 1306 event->param.rej_rcvd.reason); 1307 target->status = -ECONNRESET; 1308 } 1309 } 1310 1311 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1312 { 1313 struct srp_target_port *target = cm_id->context; 1314 struct ib_qp_attr *qp_attr = NULL; 1315 int attr_mask = 0; 1316 int comp = 0; 1317 int opcode = 0; 1318 int i; 1319 1320 switch (event->event) { 1321 case IB_CM_REQ_ERROR: 1322 shost_printk(KERN_DEBUG, target->scsi_host, 1323 PFX "Sending CM REQ failed\n"); 1324 comp = 1; 1325 target->status = -ECONNRESET; 1326 break; 1327 1328 case IB_CM_REP_RECEIVED: 1329 comp = 1; 1330 opcode = *(u8 *) event->private_data; 1331 1332 if (opcode == SRP_LOGIN_RSP) { 1333 struct srp_login_rsp *rsp = event->private_data; 1334 1335 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1336 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1337 1338 /* 1339 * Reserve credits for task management so we don't 1340 * bounce requests back to the SCSI mid-layer. 1341 */ 1342 target->scsi_host->can_queue 1343 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, 1344 target->scsi_host->can_queue); 1345 } else { 1346 shost_printk(KERN_WARNING, target->scsi_host, 1347 PFX "Unhandled RSP opcode %#x\n", opcode); 1348 target->status = -ECONNRESET; 1349 break; 1350 } 1351 1352 if (!target->rx_ring[0]) { 1353 target->status = srp_alloc_iu_bufs(target); 1354 if (target->status) 1355 break; 1356 } 1357 1358 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1359 if (!qp_attr) { 1360 target->status = -ENOMEM; 1361 break; 1362 } 1363 1364 qp_attr->qp_state = IB_QPS_RTR; 1365 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1366 if (target->status) 1367 break; 1368 1369 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1370 if (target->status) 1371 break; 1372 1373 for (i = 0; i < SRP_RQ_SIZE; i++) { 1374 struct srp_iu *iu = target->rx_ring[i]; 1375 target->status = srp_post_recv(target, iu); 1376 if (target->status) 1377 break; 1378 } 1379 if (target->status) 1380 break; 1381 1382 qp_attr->qp_state = IB_QPS_RTS; 1383 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1384 if (target->status) 1385 break; 1386 1387 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1388 if (target->status) 1389 break; 1390 1391 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1392 if (target->status) 1393 break; 1394 1395 break; 1396 1397 case IB_CM_REJ_RECEIVED: 1398 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 1399 comp = 1; 1400 1401 srp_cm_rej_handler(cm_id, event, target); 1402 break; 1403 1404 case IB_CM_DREQ_RECEIVED: 1405 shost_printk(KERN_WARNING, target->scsi_host, 1406 PFX "DREQ received - connection closed\n"); 1407 if (ib_send_cm_drep(cm_id, NULL, 0)) 1408 shost_printk(KERN_ERR, target->scsi_host, 1409 PFX "Sending CM DREP failed\n"); 1410 break; 1411 1412 case IB_CM_TIMEWAIT_EXIT: 1413 shost_printk(KERN_ERR, target->scsi_host, 1414 PFX "connection closed\n"); 1415 1416 comp = 1; 1417 target->status = 0; 1418 break; 1419 1420 case IB_CM_MRA_RECEIVED: 1421 case IB_CM_DREQ_ERROR: 1422 case IB_CM_DREP_RECEIVED: 1423 break; 1424 1425 default: 1426 shost_printk(KERN_WARNING, target->scsi_host, 1427 PFX "Unhandled CM event %d\n", event->event); 1428 break; 1429 } 1430 1431 if (comp) 1432 complete(&target->done); 1433 1434 kfree(qp_attr); 1435 1436 return 0; 1437 } 1438 1439 static int srp_send_tsk_mgmt(struct srp_target_port *target, 1440 u64 req_tag, unsigned int lun, u8 func) 1441 { 1442 struct ib_device *dev = target->srp_host->srp_dev->dev; 1443 struct srp_iu *iu; 1444 struct srp_tsk_mgmt *tsk_mgmt; 1445 1446 if (target->state == SRP_TARGET_DEAD || 1447 target->state == SRP_TARGET_REMOVED) 1448 return -1; 1449 1450 init_completion(&target->tsk_mgmt_done); 1451 1452 spin_lock_irq(&target->lock); 1453 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 1454 spin_unlock_irq(&target->lock); 1455 1456 if (!iu) 1457 return -1; 1458 1459 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 1460 DMA_TO_DEVICE); 1461 tsk_mgmt = iu->buf; 1462 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1463 1464 tsk_mgmt->opcode = SRP_TSK_MGMT; 1465 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48); 1466 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; 1467 tsk_mgmt->tsk_mgmt_func = func; 1468 tsk_mgmt->task_tag = req_tag; 1469 1470 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 1471 DMA_TO_DEVICE); 1472 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { 1473 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); 1474 return -1; 1475 } 1476 1477 if (!wait_for_completion_timeout(&target->tsk_mgmt_done, 1478 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1479 return -1; 1480 1481 return 0; 1482 } 1483 1484 static int srp_abort(struct scsi_cmnd *scmnd) 1485 { 1486 struct srp_target_port *target = host_to_target(scmnd->device->host); 1487 struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 1488 int ret = SUCCESS; 1489 1490 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1491 1492 if (!req || target->qp_in_error) 1493 return FAILED; 1494 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, 1495 SRP_TSK_ABORT_TASK)) 1496 return FAILED; 1497 1498 if (req->scmnd) { 1499 if (!target->tsk_mgmt_status) { 1500 srp_remove_req(target, req, 0); 1501 scmnd->result = DID_ABORT << 16; 1502 } else 1503 ret = FAILED; 1504 } 1505 1506 return ret; 1507 } 1508 1509 static int srp_reset_device(struct scsi_cmnd *scmnd) 1510 { 1511 struct srp_target_port *target = host_to_target(scmnd->device->host); 1512 int i; 1513 1514 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 1515 1516 if (target->qp_in_error) 1517 return FAILED; 1518 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, 1519 SRP_TSK_LUN_RESET)) 1520 return FAILED; 1521 if (target->tsk_mgmt_status) 1522 return FAILED; 1523 1524 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1525 struct srp_request *req = &target->req_ring[i]; 1526 if (req->scmnd && req->scmnd->device == scmnd->device) 1527 srp_reset_req(target, req); 1528 } 1529 1530 return SUCCESS; 1531 } 1532 1533 static int srp_reset_host(struct scsi_cmnd *scmnd) 1534 { 1535 struct srp_target_port *target = host_to_target(scmnd->device->host); 1536 int ret = FAILED; 1537 1538 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 1539 1540 if (!srp_reconnect_target(target)) 1541 ret = SUCCESS; 1542 1543 return ret; 1544 } 1545 1546 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 1547 char *buf) 1548 { 1549 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1550 1551 if (target->state == SRP_TARGET_DEAD || 1552 target->state == SRP_TARGET_REMOVED) 1553 return -ENODEV; 1554 1555 return sprintf(buf, "0x%016llx\n", 1556 (unsigned long long) be64_to_cpu(target->id_ext)); 1557 } 1558 1559 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 1560 char *buf) 1561 { 1562 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1563 1564 if (target->state == SRP_TARGET_DEAD || 1565 target->state == SRP_TARGET_REMOVED) 1566 return -ENODEV; 1567 1568 return sprintf(buf, "0x%016llx\n", 1569 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1570 } 1571 1572 static ssize_t show_service_id(struct device *dev, 1573 struct device_attribute *attr, char *buf) 1574 { 1575 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1576 1577 if (target->state == SRP_TARGET_DEAD || 1578 target->state == SRP_TARGET_REMOVED) 1579 return -ENODEV; 1580 1581 return sprintf(buf, "0x%016llx\n", 1582 (unsigned long long) be64_to_cpu(target->service_id)); 1583 } 1584 1585 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 1586 char *buf) 1587 { 1588 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1589 1590 if (target->state == SRP_TARGET_DEAD || 1591 target->state == SRP_TARGET_REMOVED) 1592 return -ENODEV; 1593 1594 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1595 } 1596 1597 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 1598 char *buf) 1599 { 1600 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1601 1602 if (target->state == SRP_TARGET_DEAD || 1603 target->state == SRP_TARGET_REMOVED) 1604 return -ENODEV; 1605 1606 return sprintf(buf, "%pI6\n", target->path.dgid.raw); 1607 } 1608 1609 static ssize_t show_orig_dgid(struct device *dev, 1610 struct device_attribute *attr, char *buf) 1611 { 1612 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1613 1614 if (target->state == SRP_TARGET_DEAD || 1615 target->state == SRP_TARGET_REMOVED) 1616 return -ENODEV; 1617 1618 return sprintf(buf, "%pI6\n", target->orig_dgid); 1619 } 1620 1621 static ssize_t show_req_lim(struct device *dev, 1622 struct device_attribute *attr, char *buf) 1623 { 1624 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1625 1626 if (target->state == SRP_TARGET_DEAD || 1627 target->state == SRP_TARGET_REMOVED) 1628 return -ENODEV; 1629 1630 return sprintf(buf, "%d\n", target->req_lim); 1631 } 1632 1633 static ssize_t show_zero_req_lim(struct device *dev, 1634 struct device_attribute *attr, char *buf) 1635 { 1636 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1637 1638 if (target->state == SRP_TARGET_DEAD || 1639 target->state == SRP_TARGET_REMOVED) 1640 return -ENODEV; 1641 1642 return sprintf(buf, "%d\n", target->zero_req_lim); 1643 } 1644 1645 static ssize_t show_local_ib_port(struct device *dev, 1646 struct device_attribute *attr, char *buf) 1647 { 1648 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1649 1650 return sprintf(buf, "%d\n", target->srp_host->port); 1651 } 1652 1653 static ssize_t show_local_ib_device(struct device *dev, 1654 struct device_attribute *attr, char *buf) 1655 { 1656 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1657 1658 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 1659 } 1660 1661 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1662 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1663 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1664 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1665 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1666 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 1667 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 1668 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1669 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 1670 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 1671 1672 static struct device_attribute *srp_host_attrs[] = { 1673 &dev_attr_id_ext, 1674 &dev_attr_ioc_guid, 1675 &dev_attr_service_id, 1676 &dev_attr_pkey, 1677 &dev_attr_dgid, 1678 &dev_attr_orig_dgid, 1679 &dev_attr_req_lim, 1680 &dev_attr_zero_req_lim, 1681 &dev_attr_local_ib_port, 1682 &dev_attr_local_ib_device, 1683 NULL 1684 }; 1685 1686 static struct scsi_host_template srp_template = { 1687 .module = THIS_MODULE, 1688 .name = "InfiniBand SRP initiator", 1689 .proc_name = DRV_NAME, 1690 .info = srp_target_info, 1691 .queuecommand = srp_queuecommand, 1692 .eh_abort_handler = srp_abort, 1693 .eh_device_reset_handler = srp_reset_device, 1694 .eh_host_reset_handler = srp_reset_host, 1695 .can_queue = SRP_CMD_SQ_SIZE, 1696 .this_id = -1, 1697 .cmd_per_lun = SRP_CMD_SQ_SIZE, 1698 .use_clustering = ENABLE_CLUSTERING, 1699 .shost_attrs = srp_host_attrs 1700 }; 1701 1702 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1703 { 1704 struct srp_rport_identifiers ids; 1705 struct srp_rport *rport; 1706 1707 sprintf(target->target_name, "SRP.T10:%016llX", 1708 (unsigned long long) be64_to_cpu(target->id_ext)); 1709 1710 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 1711 return -ENODEV; 1712 1713 memcpy(ids.port_id, &target->id_ext, 8); 1714 memcpy(ids.port_id + 8, &target->ioc_guid, 8); 1715 ids.roles = SRP_RPORT_ROLE_TARGET; 1716 rport = srp_rport_add(target->scsi_host, &ids); 1717 if (IS_ERR(rport)) { 1718 scsi_remove_host(target->scsi_host); 1719 return PTR_ERR(rport); 1720 } 1721 1722 spin_lock(&host->target_lock); 1723 list_add_tail(&target->list, &host->target_list); 1724 spin_unlock(&host->target_lock); 1725 1726 target->state = SRP_TARGET_LIVE; 1727 1728 scsi_scan_target(&target->scsi_host->shost_gendev, 1729 0, target->scsi_id, SCAN_WILD_CARD, 0); 1730 1731 return 0; 1732 } 1733 1734 static void srp_release_dev(struct device *dev) 1735 { 1736 struct srp_host *host = 1737 container_of(dev, struct srp_host, dev); 1738 1739 complete(&host->released); 1740 } 1741 1742 static struct class srp_class = { 1743 .name = "infiniband_srp", 1744 .dev_release = srp_release_dev 1745 }; 1746 1747 /* 1748 * Target ports are added by writing 1749 * 1750 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1751 * pkey=<P_Key>,service_id=<service ID> 1752 * 1753 * to the add_target sysfs attribute. 1754 */ 1755 enum { 1756 SRP_OPT_ERR = 0, 1757 SRP_OPT_ID_EXT = 1 << 0, 1758 SRP_OPT_IOC_GUID = 1 << 1, 1759 SRP_OPT_DGID = 1 << 2, 1760 SRP_OPT_PKEY = 1 << 3, 1761 SRP_OPT_SERVICE_ID = 1 << 4, 1762 SRP_OPT_MAX_SECT = 1 << 5, 1763 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 1764 SRP_OPT_IO_CLASS = 1 << 7, 1765 SRP_OPT_INITIATOR_EXT = 1 << 8, 1766 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1767 SRP_OPT_IOC_GUID | 1768 SRP_OPT_DGID | 1769 SRP_OPT_PKEY | 1770 SRP_OPT_SERVICE_ID), 1771 }; 1772 1773 static const match_table_t srp_opt_tokens = { 1774 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1775 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1776 { SRP_OPT_DGID, "dgid=%s" }, 1777 { SRP_OPT_PKEY, "pkey=%x" }, 1778 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1779 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1780 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 1781 { SRP_OPT_IO_CLASS, "io_class=%x" }, 1782 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 1783 { SRP_OPT_ERR, NULL } 1784 }; 1785 1786 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1787 { 1788 char *options, *sep_opt; 1789 char *p; 1790 char dgid[3]; 1791 substring_t args[MAX_OPT_ARGS]; 1792 int opt_mask = 0; 1793 int token; 1794 int ret = -EINVAL; 1795 int i; 1796 1797 options = kstrdup(buf, GFP_KERNEL); 1798 if (!options) 1799 return -ENOMEM; 1800 1801 sep_opt = options; 1802 while ((p = strsep(&sep_opt, ",")) != NULL) { 1803 if (!*p) 1804 continue; 1805 1806 token = match_token(p, srp_opt_tokens, args); 1807 opt_mask |= token; 1808 1809 switch (token) { 1810 case SRP_OPT_ID_EXT: 1811 p = match_strdup(args); 1812 if (!p) { 1813 ret = -ENOMEM; 1814 goto out; 1815 } 1816 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1817 kfree(p); 1818 break; 1819 1820 case SRP_OPT_IOC_GUID: 1821 p = match_strdup(args); 1822 if (!p) { 1823 ret = -ENOMEM; 1824 goto out; 1825 } 1826 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1827 kfree(p); 1828 break; 1829 1830 case SRP_OPT_DGID: 1831 p = match_strdup(args); 1832 if (!p) { 1833 ret = -ENOMEM; 1834 goto out; 1835 } 1836 if (strlen(p) != 32) { 1837 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1838 kfree(p); 1839 goto out; 1840 } 1841 1842 for (i = 0; i < 16; ++i) { 1843 strlcpy(dgid, p + i * 2, 3); 1844 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1845 } 1846 kfree(p); 1847 memcpy(target->orig_dgid, target->path.dgid.raw, 16); 1848 break; 1849 1850 case SRP_OPT_PKEY: 1851 if (match_hex(args, &token)) { 1852 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1853 goto out; 1854 } 1855 target->path.pkey = cpu_to_be16(token); 1856 break; 1857 1858 case SRP_OPT_SERVICE_ID: 1859 p = match_strdup(args); 1860 if (!p) { 1861 ret = -ENOMEM; 1862 goto out; 1863 } 1864 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1865 target->path.service_id = target->service_id; 1866 kfree(p); 1867 break; 1868 1869 case SRP_OPT_MAX_SECT: 1870 if (match_int(args, &token)) { 1871 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1872 goto out; 1873 } 1874 target->scsi_host->max_sectors = token; 1875 break; 1876 1877 case SRP_OPT_MAX_CMD_PER_LUN: 1878 if (match_int(args, &token)) { 1879 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); 1880 goto out; 1881 } 1882 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); 1883 break; 1884 1885 case SRP_OPT_IO_CLASS: 1886 if (match_hex(args, &token)) { 1887 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p); 1888 goto out; 1889 } 1890 if (token != SRP_REV10_IB_IO_CLASS && 1891 token != SRP_REV16A_IB_IO_CLASS) { 1892 printk(KERN_WARNING PFX "unknown IO class parameter value" 1893 " %x specified (use %x or %x).\n", 1894 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); 1895 goto out; 1896 } 1897 target->io_class = token; 1898 break; 1899 1900 case SRP_OPT_INITIATOR_EXT: 1901 p = match_strdup(args); 1902 if (!p) { 1903 ret = -ENOMEM; 1904 goto out; 1905 } 1906 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1907 kfree(p); 1908 break; 1909 1910 default: 1911 printk(KERN_WARNING PFX "unknown parameter or missing value " 1912 "'%s' in target creation request\n", p); 1913 goto out; 1914 } 1915 } 1916 1917 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1918 ret = 0; 1919 else 1920 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1921 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1922 !(srp_opt_tokens[i].token & opt_mask)) 1923 printk(KERN_WARNING PFX "target creation request is " 1924 "missing parameter '%s'\n", 1925 srp_opt_tokens[i].pattern); 1926 1927 out: 1928 kfree(options); 1929 return ret; 1930 } 1931 1932 static ssize_t srp_create_target(struct device *dev, 1933 struct device_attribute *attr, 1934 const char *buf, size_t count) 1935 { 1936 struct srp_host *host = 1937 container_of(dev, struct srp_host, dev); 1938 struct Scsi_Host *target_host; 1939 struct srp_target_port *target; 1940 int ret; 1941 int i; 1942 1943 target_host = scsi_host_alloc(&srp_template, 1944 sizeof (struct srp_target_port)); 1945 if (!target_host) 1946 return -ENOMEM; 1947 1948 target_host->transportt = ib_srp_transport_template; 1949 target_host->max_lun = SRP_MAX_LUN; 1950 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 1951 1952 target = host_to_target(target_host); 1953 1954 target->io_class = SRP_REV16A_IB_IO_CLASS; 1955 target->scsi_host = target_host; 1956 target->srp_host = host; 1957 target->lkey = host->srp_dev->mr->lkey; 1958 target->rkey = host->srp_dev->mr->rkey; 1959 1960 spin_lock_init(&target->lock); 1961 INIT_LIST_HEAD(&target->free_tx); 1962 INIT_LIST_HEAD(&target->free_reqs); 1963 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1964 target->req_ring[i].index = i; 1965 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1966 } 1967 1968 ret = srp_parse_options(buf, target); 1969 if (ret) 1970 goto err; 1971 1972 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid); 1973 1974 shost_printk(KERN_DEBUG, target->scsi_host, PFX 1975 "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1976 "service_id %016llx dgid %pI6\n", 1977 (unsigned long long) be64_to_cpu(target->id_ext), 1978 (unsigned long long) be64_to_cpu(target->ioc_guid), 1979 be16_to_cpu(target->path.pkey), 1980 (unsigned long long) be64_to_cpu(target->service_id), 1981 target->path.dgid.raw); 1982 1983 ret = srp_create_target_ib(target); 1984 if (ret) 1985 goto err; 1986 1987 ret = srp_new_cm_id(target); 1988 if (ret) 1989 goto err_free; 1990 1991 target->qp_in_error = 0; 1992 ret = srp_connect_target(target); 1993 if (ret) { 1994 shost_printk(KERN_ERR, target->scsi_host, 1995 PFX "Connection failed\n"); 1996 goto err_cm_id; 1997 } 1998 1999 ret = srp_add_target(host, target); 2000 if (ret) 2001 goto err_disconnect; 2002 2003 return count; 2004 2005 err_disconnect: 2006 srp_disconnect_target(target); 2007 2008 err_cm_id: 2009 ib_destroy_cm_id(target->cm_id); 2010 2011 err_free: 2012 srp_free_target_ib(target); 2013 2014 err: 2015 scsi_host_put(target_host); 2016 2017 return ret; 2018 } 2019 2020 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 2021 2022 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 2023 char *buf) 2024 { 2025 struct srp_host *host = container_of(dev, struct srp_host, dev); 2026 2027 return sprintf(buf, "%s\n", host->srp_dev->dev->name); 2028 } 2029 2030 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 2031 2032 static ssize_t show_port(struct device *dev, struct device_attribute *attr, 2033 char *buf) 2034 { 2035 struct srp_host *host = container_of(dev, struct srp_host, dev); 2036 2037 return sprintf(buf, "%d\n", host->port); 2038 } 2039 2040 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 2041 2042 static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 2043 { 2044 struct srp_host *host; 2045 2046 host = kzalloc(sizeof *host, GFP_KERNEL); 2047 if (!host) 2048 return NULL; 2049 2050 INIT_LIST_HEAD(&host->target_list); 2051 spin_lock_init(&host->target_lock); 2052 init_completion(&host->released); 2053 host->srp_dev = device; 2054 host->port = port; 2055 2056 host->dev.class = &srp_class; 2057 host->dev.parent = device->dev->dma_device; 2058 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 2059 2060 if (device_register(&host->dev)) 2061 goto free_host; 2062 if (device_create_file(&host->dev, &dev_attr_add_target)) 2063 goto err_class; 2064 if (device_create_file(&host->dev, &dev_attr_ibdev)) 2065 goto err_class; 2066 if (device_create_file(&host->dev, &dev_attr_port)) 2067 goto err_class; 2068 2069 return host; 2070 2071 err_class: 2072 device_unregister(&host->dev); 2073 2074 free_host: 2075 kfree(host); 2076 2077 return NULL; 2078 } 2079 2080 static void srp_add_one(struct ib_device *device) 2081 { 2082 struct srp_device *srp_dev; 2083 struct ib_device_attr *dev_attr; 2084 struct ib_fmr_pool_param fmr_param; 2085 struct srp_host *host; 2086 int s, e, p; 2087 2088 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 2089 if (!dev_attr) 2090 return; 2091 2092 if (ib_query_device(device, dev_attr)) { 2093 printk(KERN_WARNING PFX "Query device failed for %s\n", 2094 device->name); 2095 goto free_attr; 2096 } 2097 2098 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 2099 if (!srp_dev) 2100 goto free_attr; 2101 2102 /* 2103 * Use the smallest page size supported by the HCA, down to a 2104 * minimum of 512 bytes (which is the smallest sector that a 2105 * SCSI command will ever carry). 2106 */ 2107 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); 2108 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; 2109 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2110 2111 INIT_LIST_HEAD(&srp_dev->dev_list); 2112 2113 srp_dev->dev = device; 2114 srp_dev->pd = ib_alloc_pd(device); 2115 if (IS_ERR(srp_dev->pd)) 2116 goto free_dev; 2117 2118 srp_dev->mr = ib_get_dma_mr(srp_dev->pd, 2119 IB_ACCESS_LOCAL_WRITE | 2120 IB_ACCESS_REMOTE_READ | 2121 IB_ACCESS_REMOTE_WRITE); 2122 if (IS_ERR(srp_dev->mr)) 2123 goto err_pd; 2124 2125 memset(&fmr_param, 0, sizeof fmr_param); 2126 fmr_param.pool_size = SRP_FMR_POOL_SIZE; 2127 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 2128 fmr_param.cache = 1; 2129 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; 2130 fmr_param.page_shift = srp_dev->fmr_page_shift; 2131 fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 2132 IB_ACCESS_REMOTE_WRITE | 2133 IB_ACCESS_REMOTE_READ); 2134 2135 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); 2136 if (IS_ERR(srp_dev->fmr_pool)) 2137 srp_dev->fmr_pool = NULL; 2138 2139 if (device->node_type == RDMA_NODE_IB_SWITCH) { 2140 s = 0; 2141 e = 0; 2142 } else { 2143 s = 1; 2144 e = device->phys_port_cnt; 2145 } 2146 2147 for (p = s; p <= e; ++p) { 2148 host = srp_add_port(srp_dev, p); 2149 if (host) 2150 list_add_tail(&host->list, &srp_dev->dev_list); 2151 } 2152 2153 ib_set_client_data(device, &srp_client, srp_dev); 2154 2155 goto free_attr; 2156 2157 err_pd: 2158 ib_dealloc_pd(srp_dev->pd); 2159 2160 free_dev: 2161 kfree(srp_dev); 2162 2163 free_attr: 2164 kfree(dev_attr); 2165 } 2166 2167 static void srp_remove_one(struct ib_device *device) 2168 { 2169 struct srp_device *srp_dev; 2170 struct srp_host *host, *tmp_host; 2171 LIST_HEAD(target_list); 2172 struct srp_target_port *target, *tmp_target; 2173 2174 srp_dev = ib_get_client_data(device, &srp_client); 2175 2176 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 2177 device_unregister(&host->dev); 2178 /* 2179 * Wait for the sysfs entry to go away, so that no new 2180 * target ports can be created. 2181 */ 2182 wait_for_completion(&host->released); 2183 2184 /* 2185 * Mark all target ports as removed, so we stop queueing 2186 * commands and don't try to reconnect. 2187 */ 2188 spin_lock(&host->target_lock); 2189 list_for_each_entry(target, &host->target_list, list) { 2190 spin_lock_irq(&target->lock); 2191 target->state = SRP_TARGET_REMOVED; 2192 spin_unlock_irq(&target->lock); 2193 } 2194 spin_unlock(&host->target_lock); 2195 2196 /* 2197 * Wait for any reconnection tasks that may have 2198 * started before we marked our target ports as 2199 * removed, and any target port removal tasks. 2200 */ 2201 flush_workqueue(ib_wq); 2202 2203 list_for_each_entry_safe(target, tmp_target, 2204 &host->target_list, list) { 2205 srp_remove_host(target->scsi_host); 2206 scsi_remove_host(target->scsi_host); 2207 srp_disconnect_target(target); 2208 ib_destroy_cm_id(target->cm_id); 2209 srp_free_target_ib(target); 2210 scsi_host_put(target->scsi_host); 2211 } 2212 2213 kfree(host); 2214 } 2215 2216 if (srp_dev->fmr_pool) 2217 ib_destroy_fmr_pool(srp_dev->fmr_pool); 2218 ib_dereg_mr(srp_dev->mr); 2219 ib_dealloc_pd(srp_dev->pd); 2220 2221 kfree(srp_dev); 2222 } 2223 2224 static struct srp_function_template ib_srp_transport_functions = { 2225 }; 2226 2227 static int __init srp_init_module(void) 2228 { 2229 int ret; 2230 2231 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 2232 2233 if (srp_sg_tablesize > 255) { 2234 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2235 srp_sg_tablesize = 255; 2236 } 2237 2238 ib_srp_transport_template = 2239 srp_attach_transport(&ib_srp_transport_functions); 2240 if (!ib_srp_transport_template) 2241 return -ENOMEM; 2242 2243 srp_template.sg_tablesize = srp_sg_tablesize; 2244 srp_max_iu_len = (sizeof (struct srp_cmd) + 2245 sizeof (struct srp_indirect_buf) + 2246 srp_sg_tablesize * 16); 2247 2248 ret = class_register(&srp_class); 2249 if (ret) { 2250 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 2251 srp_release_transport(ib_srp_transport_template); 2252 return ret; 2253 } 2254 2255 ib_sa_register_client(&srp_sa_client); 2256 2257 ret = ib_register_client(&srp_client); 2258 if (ret) { 2259 printk(KERN_ERR PFX "couldn't register IB client\n"); 2260 srp_release_transport(ib_srp_transport_template); 2261 ib_sa_unregister_client(&srp_sa_client); 2262 class_unregister(&srp_class); 2263 return ret; 2264 } 2265 2266 return 0; 2267 } 2268 2269 static void __exit srp_cleanup_module(void) 2270 { 2271 ib_unregister_client(&srp_client); 2272 ib_sa_unregister_client(&srp_sa_client); 2273 class_unregister(&srp_class); 2274 srp_release_transport(ib_srp_transport_template); 2275 } 2276 2277 module_init(srp_init_module); 2278 module_exit(srp_cleanup_module); 2279