1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #define pr_fmt(fmt) PFX fmt 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/string.h> 40 #include <linux/parser.h> 41 #include <linux/random.h> 42 #include <linux/jiffies.h> 43 44 #include <linux/atomic.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_dbg.h> 49 #include <scsi/srp.h> 50 #include <scsi/scsi_transport_srp.h> 51 52 #include "ib_srp.h" 53 54 #define DRV_NAME "ib_srp" 55 #define PFX DRV_NAME ": " 56 #define DRV_VERSION "0.2" 57 #define DRV_RELDATE "November 1, 2005" 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 61 "v" DRV_VERSION " (" DRV_RELDATE ")"); 62 MODULE_LICENSE("Dual BSD/GPL"); 63 64 static unsigned int srp_sg_tablesize; 65 static unsigned int cmd_sg_entries; 66 static unsigned int indirect_sg_entries; 67 static bool allow_ext_sg; 68 static int topspin_workarounds = 1; 69 70 module_param(srp_sg_tablesize, uint, 0444); 71 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 72 73 module_param(cmd_sg_entries, uint, 0444); 74 MODULE_PARM_DESC(cmd_sg_entries, 75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 76 77 module_param(indirect_sg_entries, uint, 0444); 78 MODULE_PARM_DESC(indirect_sg_entries, 79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); 80 81 module_param(allow_ext_sg, bool, 0444); 82 MODULE_PARM_DESC(allow_ext_sg, 83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 84 85 module_param(topspin_workarounds, int, 0444); 86 MODULE_PARM_DESC(topspin_workarounds, 87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 88 89 static void srp_add_one(struct ib_device *device); 90 static void srp_remove_one(struct ib_device *device); 91 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); 92 static void srp_send_completion(struct ib_cq *cq, void *target_ptr); 93 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 94 95 static struct scsi_transport_template *ib_srp_transport_template; 96 97 static struct ib_client srp_client = { 98 .name = "srp", 99 .add = srp_add_one, 100 .remove = srp_remove_one 101 }; 102 103 static struct ib_sa_client srp_sa_client; 104 105 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 106 { 107 return (struct srp_target_port *) host->hostdata; 108 } 109 110 static const char *srp_target_info(struct Scsi_Host *host) 111 { 112 return host_to_target(host)->target_name; 113 } 114 115 static int srp_target_is_topspin(struct srp_target_port *target) 116 { 117 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 118 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 119 120 return topspin_workarounds && 121 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 122 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 123 } 124 125 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 126 gfp_t gfp_mask, 127 enum dma_data_direction direction) 128 { 129 struct srp_iu *iu; 130 131 iu = kmalloc(sizeof *iu, gfp_mask); 132 if (!iu) 133 goto out; 134 135 iu->buf = kzalloc(size, gfp_mask); 136 if (!iu->buf) 137 goto out_free_iu; 138 139 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 140 direction); 141 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 142 goto out_free_buf; 143 144 iu->size = size; 145 iu->direction = direction; 146 147 return iu; 148 149 out_free_buf: 150 kfree(iu->buf); 151 out_free_iu: 152 kfree(iu); 153 out: 154 return NULL; 155 } 156 157 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 158 { 159 if (!iu) 160 return; 161 162 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 163 iu->direction); 164 kfree(iu->buf); 165 kfree(iu); 166 } 167 168 static void srp_qp_event(struct ib_event *event, void *context) 169 { 170 pr_debug("QP event %d\n", event->event); 171 } 172 173 static int srp_init_qp(struct srp_target_port *target, 174 struct ib_qp *qp) 175 { 176 struct ib_qp_attr *attr; 177 int ret; 178 179 attr = kmalloc(sizeof *attr, GFP_KERNEL); 180 if (!attr) 181 return -ENOMEM; 182 183 ret = ib_find_pkey(target->srp_host->srp_dev->dev, 184 target->srp_host->port, 185 be16_to_cpu(target->path.pkey), 186 &attr->pkey_index); 187 if (ret) 188 goto out; 189 190 attr->qp_state = IB_QPS_INIT; 191 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 192 IB_ACCESS_REMOTE_WRITE); 193 attr->port_num = target->srp_host->port; 194 195 ret = ib_modify_qp(qp, attr, 196 IB_QP_STATE | 197 IB_QP_PKEY_INDEX | 198 IB_QP_ACCESS_FLAGS | 199 IB_QP_PORT); 200 201 out: 202 kfree(attr); 203 return ret; 204 } 205 206 static int srp_new_cm_id(struct srp_target_port *target) 207 { 208 struct ib_cm_id *new_cm_id; 209 210 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 211 srp_cm_handler, target); 212 if (IS_ERR(new_cm_id)) 213 return PTR_ERR(new_cm_id); 214 215 if (target->cm_id) 216 ib_destroy_cm_id(target->cm_id); 217 target->cm_id = new_cm_id; 218 219 return 0; 220 } 221 222 static int srp_create_target_ib(struct srp_target_port *target) 223 { 224 struct ib_qp_init_attr *init_attr; 225 struct ib_cq *recv_cq, *send_cq; 226 struct ib_qp *qp; 227 int ret; 228 229 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 230 if (!init_attr) 231 return -ENOMEM; 232 233 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, 234 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); 235 if (IS_ERR(recv_cq)) { 236 ret = PTR_ERR(recv_cq); 237 goto err; 238 } 239 240 send_cq = ib_create_cq(target->srp_host->srp_dev->dev, 241 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); 242 if (IS_ERR(send_cq)) { 243 ret = PTR_ERR(send_cq); 244 goto err_recv_cq; 245 } 246 247 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); 248 249 init_attr->event_handler = srp_qp_event; 250 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 251 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 252 init_attr->cap.max_recv_sge = 1; 253 init_attr->cap.max_send_sge = 1; 254 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 255 init_attr->qp_type = IB_QPT_RC; 256 init_attr->send_cq = send_cq; 257 init_attr->recv_cq = recv_cq; 258 259 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); 260 if (IS_ERR(qp)) { 261 ret = PTR_ERR(qp); 262 goto err_send_cq; 263 } 264 265 ret = srp_init_qp(target, qp); 266 if (ret) 267 goto err_qp; 268 269 if (target->qp) 270 ib_destroy_qp(target->qp); 271 if (target->recv_cq) 272 ib_destroy_cq(target->recv_cq); 273 if (target->send_cq) 274 ib_destroy_cq(target->send_cq); 275 276 target->qp = qp; 277 target->recv_cq = recv_cq; 278 target->send_cq = send_cq; 279 280 kfree(init_attr); 281 return 0; 282 283 err_qp: 284 ib_destroy_qp(qp); 285 286 err_send_cq: 287 ib_destroy_cq(send_cq); 288 289 err_recv_cq: 290 ib_destroy_cq(recv_cq); 291 292 err: 293 kfree(init_attr); 294 return ret; 295 } 296 297 static void srp_free_target_ib(struct srp_target_port *target) 298 { 299 int i; 300 301 ib_destroy_qp(target->qp); 302 ib_destroy_cq(target->send_cq); 303 ib_destroy_cq(target->recv_cq); 304 305 target->qp = NULL; 306 target->send_cq = target->recv_cq = NULL; 307 308 for (i = 0; i < SRP_RQ_SIZE; ++i) 309 srp_free_iu(target->srp_host, target->rx_ring[i]); 310 for (i = 0; i < SRP_SQ_SIZE; ++i) 311 srp_free_iu(target->srp_host, target->tx_ring[i]); 312 } 313 314 static void srp_path_rec_completion(int status, 315 struct ib_sa_path_rec *pathrec, 316 void *target_ptr) 317 { 318 struct srp_target_port *target = target_ptr; 319 320 target->status = status; 321 if (status) 322 shost_printk(KERN_ERR, target->scsi_host, 323 PFX "Got failed path rec status %d\n", status); 324 else 325 target->path = *pathrec; 326 complete(&target->done); 327 } 328 329 static int srp_lookup_path(struct srp_target_port *target) 330 { 331 target->path.numb_path = 1; 332 333 init_completion(&target->done); 334 335 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 336 target->srp_host->srp_dev->dev, 337 target->srp_host->port, 338 &target->path, 339 IB_SA_PATH_REC_SERVICE_ID | 340 IB_SA_PATH_REC_DGID | 341 IB_SA_PATH_REC_SGID | 342 IB_SA_PATH_REC_NUMB_PATH | 343 IB_SA_PATH_REC_PKEY, 344 SRP_PATH_REC_TIMEOUT_MS, 345 GFP_KERNEL, 346 srp_path_rec_completion, 347 target, &target->path_query); 348 if (target->path_query_id < 0) 349 return target->path_query_id; 350 351 wait_for_completion(&target->done); 352 353 if (target->status < 0) 354 shost_printk(KERN_WARNING, target->scsi_host, 355 PFX "Path record query failed\n"); 356 357 return target->status; 358 } 359 360 static int srp_send_req(struct srp_target_port *target) 361 { 362 struct { 363 struct ib_cm_req_param param; 364 struct srp_login_req priv; 365 } *req = NULL; 366 int status; 367 368 req = kzalloc(sizeof *req, GFP_KERNEL); 369 if (!req) 370 return -ENOMEM; 371 372 req->param.primary_path = &target->path; 373 req->param.alternate_path = NULL; 374 req->param.service_id = target->service_id; 375 req->param.qp_num = target->qp->qp_num; 376 req->param.qp_type = target->qp->qp_type; 377 req->param.private_data = &req->priv; 378 req->param.private_data_len = sizeof req->priv; 379 req->param.flow_control = 1; 380 381 get_random_bytes(&req->param.starting_psn, 4); 382 req->param.starting_psn &= 0xffffff; 383 384 /* 385 * Pick some arbitrary defaults here; we could make these 386 * module parameters if anyone cared about setting them. 387 */ 388 req->param.responder_resources = 4; 389 req->param.remote_cm_response_timeout = 20; 390 req->param.local_cm_response_timeout = 20; 391 req->param.retry_count = 7; 392 req->param.rnr_retry_count = 7; 393 req->param.max_cm_retries = 15; 394 395 req->priv.opcode = SRP_LOGIN_REQ; 396 req->priv.tag = 0; 397 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 398 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 399 SRP_BUF_FORMAT_INDIRECT); 400 /* 401 * In the published SRP specification (draft rev. 16a), the 402 * port identifier format is 8 bytes of ID extension followed 403 * by 8 bytes of GUID. Older drafts put the two halves in the 404 * opposite order, so that the GUID comes first. 405 * 406 * Targets conforming to these obsolete drafts can be 407 * recognized by the I/O Class they report. 408 */ 409 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 410 memcpy(req->priv.initiator_port_id, 411 &target->path.sgid.global.interface_id, 8); 412 memcpy(req->priv.initiator_port_id + 8, 413 &target->initiator_ext, 8); 414 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 415 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 416 } else { 417 memcpy(req->priv.initiator_port_id, 418 &target->initiator_ext, 8); 419 memcpy(req->priv.initiator_port_id + 8, 420 &target->path.sgid.global.interface_id, 8); 421 memcpy(req->priv.target_port_id, &target->id_ext, 8); 422 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 423 } 424 425 /* 426 * Topspin/Cisco SRP targets will reject our login unless we 427 * zero out the first 8 bytes of our initiator port ID and set 428 * the second 8 bytes to the local node GUID. 429 */ 430 if (srp_target_is_topspin(target)) { 431 shost_printk(KERN_DEBUG, target->scsi_host, 432 PFX "Topspin/Cisco initiator port ID workaround " 433 "activated for target GUID %016llx\n", 434 (unsigned long long) be64_to_cpu(target->ioc_guid)); 435 memset(req->priv.initiator_port_id, 0, 8); 436 memcpy(req->priv.initiator_port_id + 8, 437 &target->srp_host->srp_dev->dev->node_guid, 8); 438 } 439 440 status = ib_send_cm_req(target->cm_id, &req->param); 441 442 kfree(req); 443 444 return status; 445 } 446 447 static bool srp_queue_remove_work(struct srp_target_port *target) 448 { 449 bool changed = false; 450 451 spin_lock_irq(&target->lock); 452 if (target->state != SRP_TARGET_REMOVED) { 453 target->state = SRP_TARGET_REMOVED; 454 changed = true; 455 } 456 spin_unlock_irq(&target->lock); 457 458 if (changed) 459 queue_work(system_long_wq, &target->remove_work); 460 461 return changed; 462 } 463 464 static bool srp_change_conn_state(struct srp_target_port *target, 465 bool connected) 466 { 467 bool changed = false; 468 469 spin_lock_irq(&target->lock); 470 if (target->connected != connected) { 471 target->connected = connected; 472 changed = true; 473 } 474 spin_unlock_irq(&target->lock); 475 476 return changed; 477 } 478 479 static void srp_disconnect_target(struct srp_target_port *target) 480 { 481 if (srp_change_conn_state(target, false)) { 482 /* XXX should send SRP_I_LOGOUT request */ 483 484 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 485 shost_printk(KERN_DEBUG, target->scsi_host, 486 PFX "Sending CM DREQ failed\n"); 487 } 488 } 489 } 490 491 static void srp_free_req_data(struct srp_target_port *target) 492 { 493 struct ib_device *ibdev = target->srp_host->srp_dev->dev; 494 struct srp_request *req; 495 int i; 496 497 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) { 498 kfree(req->fmr_list); 499 kfree(req->map_page); 500 if (req->indirect_dma_addr) { 501 ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 502 target->indirect_size, 503 DMA_TO_DEVICE); 504 } 505 kfree(req->indirect_desc); 506 } 507 } 508 509 /** 510 * srp_del_scsi_host_attr() - Remove attributes defined in the host template. 511 * @shost: SCSI host whose attributes to remove from sysfs. 512 * 513 * Note: Any attributes defined in the host template and that did not exist 514 * before invocation of this function will be ignored. 515 */ 516 static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 517 { 518 struct device_attribute **attr; 519 520 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 521 device_remove_file(&shost->shost_dev, *attr); 522 } 523 524 static void srp_remove_target(struct srp_target_port *target) 525 { 526 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 527 528 srp_del_scsi_host_attr(target->scsi_host); 529 srp_remove_host(target->scsi_host); 530 scsi_remove_host(target->scsi_host); 531 srp_disconnect_target(target); 532 ib_destroy_cm_id(target->cm_id); 533 srp_free_target_ib(target); 534 srp_free_req_data(target); 535 scsi_host_put(target->scsi_host); 536 } 537 538 static void srp_remove_work(struct work_struct *work) 539 { 540 struct srp_target_port *target = 541 container_of(work, struct srp_target_port, remove_work); 542 543 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 544 545 spin_lock(&target->srp_host->target_lock); 546 list_del(&target->list); 547 spin_unlock(&target->srp_host->target_lock); 548 549 srp_remove_target(target); 550 } 551 552 static void srp_rport_delete(struct srp_rport *rport) 553 { 554 struct srp_target_port *target = rport->lld_data; 555 556 srp_queue_remove_work(target); 557 } 558 559 static int srp_connect_target(struct srp_target_port *target) 560 { 561 int retries = 3; 562 int ret; 563 564 WARN_ON_ONCE(target->connected); 565 566 target->qp_in_error = false; 567 568 ret = srp_lookup_path(target); 569 if (ret) 570 return ret; 571 572 while (1) { 573 init_completion(&target->done); 574 ret = srp_send_req(target); 575 if (ret) 576 return ret; 577 wait_for_completion(&target->done); 578 579 /* 580 * The CM event handling code will set status to 581 * SRP_PORT_REDIRECT if we get a port redirect REJ 582 * back, or SRP_DLID_REDIRECT if we get a lid/qp 583 * redirect REJ back. 584 */ 585 switch (target->status) { 586 case 0: 587 srp_change_conn_state(target, true); 588 return 0; 589 590 case SRP_PORT_REDIRECT: 591 ret = srp_lookup_path(target); 592 if (ret) 593 return ret; 594 break; 595 596 case SRP_DLID_REDIRECT: 597 break; 598 599 case SRP_STALE_CONN: 600 /* Our current CM id was stale, and is now in timewait. 601 * Try to reconnect with a new one. 602 */ 603 if (!retries-- || srp_new_cm_id(target)) { 604 shost_printk(KERN_ERR, target->scsi_host, PFX 605 "giving up on stale connection\n"); 606 target->status = -ECONNRESET; 607 return target->status; 608 } 609 610 shost_printk(KERN_ERR, target->scsi_host, PFX 611 "retrying stale connection\n"); 612 break; 613 614 default: 615 return target->status; 616 } 617 } 618 } 619 620 static void srp_unmap_data(struct scsi_cmnd *scmnd, 621 struct srp_target_port *target, 622 struct srp_request *req) 623 { 624 struct ib_device *ibdev = target->srp_host->srp_dev->dev; 625 struct ib_pool_fmr **pfmr; 626 627 if (!scsi_sglist(scmnd) || 628 (scmnd->sc_data_direction != DMA_TO_DEVICE && 629 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 630 return; 631 632 pfmr = req->fmr_list; 633 while (req->nfmr--) 634 ib_fmr_pool_unmap(*pfmr++); 635 636 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 637 scmnd->sc_data_direction); 638 } 639 640 /** 641 * srp_claim_req - Take ownership of the scmnd associated with a request. 642 * @target: SRP target port. 643 * @req: SRP request. 644 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 645 * ownership of @req->scmnd if it equals @scmnd. 646 * 647 * Return value: 648 * Either NULL or a pointer to the SCSI command the caller became owner of. 649 */ 650 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, 651 struct srp_request *req, 652 struct scsi_cmnd *scmnd) 653 { 654 unsigned long flags; 655 656 spin_lock_irqsave(&target->lock, flags); 657 if (!scmnd) { 658 scmnd = req->scmnd; 659 req->scmnd = NULL; 660 } else if (req->scmnd == scmnd) { 661 req->scmnd = NULL; 662 } else { 663 scmnd = NULL; 664 } 665 spin_unlock_irqrestore(&target->lock, flags); 666 667 return scmnd; 668 } 669 670 /** 671 * srp_free_req() - Unmap data and add request to the free request list. 672 */ 673 static void srp_free_req(struct srp_target_port *target, 674 struct srp_request *req, struct scsi_cmnd *scmnd, 675 s32 req_lim_delta) 676 { 677 unsigned long flags; 678 679 srp_unmap_data(scmnd, target, req); 680 681 spin_lock_irqsave(&target->lock, flags); 682 target->req_lim += req_lim_delta; 683 list_add_tail(&req->list, &target->free_reqs); 684 spin_unlock_irqrestore(&target->lock, flags); 685 } 686 687 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 688 { 689 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); 690 691 if (scmnd) { 692 srp_free_req(target, req, scmnd, 0); 693 scmnd->result = DID_RESET << 16; 694 scmnd->scsi_done(scmnd); 695 } 696 } 697 698 static int srp_reconnect_target(struct srp_target_port *target) 699 { 700 struct Scsi_Host *shost = target->scsi_host; 701 int i, ret; 702 703 scsi_target_block(&shost->shost_gendev); 704 705 srp_disconnect_target(target); 706 /* 707 * Now get a new local CM ID so that we avoid confusing the target in 708 * case things are really fouled up. Doing so also ensures that all CM 709 * callbacks will have finished before a new QP is allocated. 710 */ 711 ret = srp_new_cm_id(target); 712 /* 713 * Whether or not creating a new CM ID succeeded, create a new 714 * QP. This guarantees that all completion callback function 715 * invocations have finished before request resetting starts. 716 */ 717 if (ret == 0) 718 ret = srp_create_target_ib(target); 719 else 720 srp_create_target_ib(target); 721 722 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 723 struct srp_request *req = &target->req_ring[i]; 724 if (req->scmnd) 725 srp_reset_req(target, req); 726 } 727 728 INIT_LIST_HEAD(&target->free_tx); 729 for (i = 0; i < SRP_SQ_SIZE; ++i) 730 list_add(&target->tx_ring[i]->list, &target->free_tx); 731 732 if (ret == 0) 733 ret = srp_connect_target(target); 734 735 scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : 736 SDEV_TRANSPORT_OFFLINE); 737 target->transport_offline = !!ret; 738 739 if (ret) 740 goto err; 741 742 shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n"); 743 744 return ret; 745 746 err: 747 shost_printk(KERN_ERR, target->scsi_host, 748 PFX "reconnect failed (%d), removing target port.\n", ret); 749 750 /* 751 * We couldn't reconnect, so kill our target port off. 752 * However, we have to defer the real removal because we 753 * are in the context of the SCSI error handler now, which 754 * will deadlock if we call scsi_remove_host(). 755 */ 756 srp_queue_remove_work(target); 757 758 return ret; 759 } 760 761 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 762 unsigned int dma_len, u32 rkey) 763 { 764 struct srp_direct_buf *desc = state->desc; 765 766 desc->va = cpu_to_be64(dma_addr); 767 desc->key = cpu_to_be32(rkey); 768 desc->len = cpu_to_be32(dma_len); 769 770 state->total_len += dma_len; 771 state->desc++; 772 state->ndesc++; 773 } 774 775 static int srp_map_finish_fmr(struct srp_map_state *state, 776 struct srp_target_port *target) 777 { 778 struct srp_device *dev = target->srp_host->srp_dev; 779 struct ib_pool_fmr *fmr; 780 u64 io_addr = 0; 781 782 if (!state->npages) 783 return 0; 784 785 if (state->npages == 1) { 786 srp_map_desc(state, state->base_dma_addr, state->fmr_len, 787 target->rkey); 788 state->npages = state->fmr_len = 0; 789 return 0; 790 } 791 792 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages, 793 state->npages, io_addr); 794 if (IS_ERR(fmr)) 795 return PTR_ERR(fmr); 796 797 *state->next_fmr++ = fmr; 798 state->nfmr++; 799 800 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey); 801 state->npages = state->fmr_len = 0; 802 return 0; 803 } 804 805 static void srp_map_update_start(struct srp_map_state *state, 806 struct scatterlist *sg, int sg_index, 807 dma_addr_t dma_addr) 808 { 809 state->unmapped_sg = sg; 810 state->unmapped_index = sg_index; 811 state->unmapped_addr = dma_addr; 812 } 813 814 static int srp_map_sg_entry(struct srp_map_state *state, 815 struct srp_target_port *target, 816 struct scatterlist *sg, int sg_index, 817 int use_fmr) 818 { 819 struct srp_device *dev = target->srp_host->srp_dev; 820 struct ib_device *ibdev = dev->dev; 821 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 822 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 823 unsigned int len; 824 int ret; 825 826 if (!dma_len) 827 return 0; 828 829 if (use_fmr == SRP_MAP_NO_FMR) { 830 /* Once we're in direct map mode for a request, we don't 831 * go back to FMR mode, so no need to update anything 832 * other than the descriptor. 833 */ 834 srp_map_desc(state, dma_addr, dma_len, target->rkey); 835 return 0; 836 } 837 838 /* If we start at an offset into the FMR page, don't merge into 839 * the current FMR. Finish it out, and use the kernel's MR for this 840 * sg entry. This is to avoid potential bugs on some SRP targets 841 * that were never quite defined, but went away when the initiator 842 * avoided using FMR on such page fragments. 843 */ 844 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) { 845 ret = srp_map_finish_fmr(state, target); 846 if (ret) 847 return ret; 848 849 srp_map_desc(state, dma_addr, dma_len, target->rkey); 850 srp_map_update_start(state, NULL, 0, 0); 851 return 0; 852 } 853 854 /* If this is the first sg to go into the FMR, save our position. 855 * We need to know the first unmapped entry, its index, and the 856 * first unmapped address within that entry to be able to restart 857 * mapping after an error. 858 */ 859 if (!state->unmapped_sg) 860 srp_map_update_start(state, sg, sg_index, dma_addr); 861 862 while (dma_len) { 863 if (state->npages == SRP_FMR_SIZE) { 864 ret = srp_map_finish_fmr(state, target); 865 if (ret) 866 return ret; 867 868 srp_map_update_start(state, sg, sg_index, dma_addr); 869 } 870 871 len = min_t(unsigned int, dma_len, dev->fmr_page_size); 872 873 if (!state->npages) 874 state->base_dma_addr = dma_addr; 875 state->pages[state->npages++] = dma_addr; 876 state->fmr_len += len; 877 dma_addr += len; 878 dma_len -= len; 879 } 880 881 /* If the last entry of the FMR wasn't a full page, then we need to 882 * close it out and start a new one -- we can only merge at page 883 * boundries. 884 */ 885 ret = 0; 886 if (len != dev->fmr_page_size) { 887 ret = srp_map_finish_fmr(state, target); 888 if (!ret) 889 srp_map_update_start(state, NULL, 0, 0); 890 } 891 return ret; 892 } 893 894 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 895 struct srp_request *req) 896 { 897 struct scatterlist *scat, *sg; 898 struct srp_cmd *cmd = req->cmd->buf; 899 int i, len, nents, count, use_fmr; 900 struct srp_device *dev; 901 struct ib_device *ibdev; 902 struct srp_map_state state; 903 struct srp_indirect_buf *indirect_hdr; 904 u32 table_len; 905 u8 fmt; 906 907 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 908 return sizeof (struct srp_cmd); 909 910 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 911 scmnd->sc_data_direction != DMA_TO_DEVICE) { 912 shost_printk(KERN_WARNING, target->scsi_host, 913 PFX "Unhandled data direction %d\n", 914 scmnd->sc_data_direction); 915 return -EINVAL; 916 } 917 918 nents = scsi_sg_count(scmnd); 919 scat = scsi_sglist(scmnd); 920 921 dev = target->srp_host->srp_dev; 922 ibdev = dev->dev; 923 924 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 925 if (unlikely(count == 0)) 926 return -EIO; 927 928 fmt = SRP_DATA_DESC_DIRECT; 929 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 930 931 if (count == 1) { 932 /* 933 * The midlayer only generated a single gather/scatter 934 * entry, or DMA mapping coalesced everything to a 935 * single entry. So a direct descriptor along with 936 * the DMA MR suffices. 937 */ 938 struct srp_direct_buf *buf = (void *) cmd->add_data; 939 940 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 941 buf->key = cpu_to_be32(target->rkey); 942 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 943 944 req->nfmr = 0; 945 goto map_complete; 946 } 947 948 /* We have more than one scatter/gather entry, so build our indirect 949 * descriptor table, trying to merge as many entries with FMR as we 950 * can. 951 */ 952 indirect_hdr = (void *) cmd->add_data; 953 954 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 955 target->indirect_size, DMA_TO_DEVICE); 956 957 memset(&state, 0, sizeof(state)); 958 state.desc = req->indirect_desc; 959 state.pages = req->map_page; 960 state.next_fmr = req->fmr_list; 961 962 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR; 963 964 for_each_sg(scat, sg, count, i) { 965 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) { 966 /* FMR mapping failed, so backtrack to the first 967 * unmapped entry and continue on without using FMR. 968 */ 969 dma_addr_t dma_addr; 970 unsigned int dma_len; 971 972 backtrack: 973 sg = state.unmapped_sg; 974 i = state.unmapped_index; 975 976 dma_addr = ib_sg_dma_address(ibdev, sg); 977 dma_len = ib_sg_dma_len(ibdev, sg); 978 dma_len -= (state.unmapped_addr - dma_addr); 979 dma_addr = state.unmapped_addr; 980 use_fmr = SRP_MAP_NO_FMR; 981 srp_map_desc(&state, dma_addr, dma_len, target->rkey); 982 } 983 } 984 985 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target)) 986 goto backtrack; 987 988 /* We've mapped the request, now pull as much of the indirect 989 * descriptor table as we can into the command buffer. If this 990 * target is not using an external indirect table, we are 991 * guaranteed to fit into the command, as the SCSI layer won't 992 * give us more S/G entries than we allow. 993 */ 994 req->nfmr = state.nfmr; 995 if (state.ndesc == 1) { 996 /* FMR mapping was able to collapse this to one entry, 997 * so use a direct descriptor. 998 */ 999 struct srp_direct_buf *buf = (void *) cmd->add_data; 1000 1001 *buf = req->indirect_desc[0]; 1002 goto map_complete; 1003 } 1004 1005 if (unlikely(target->cmd_sg_cnt < state.ndesc && 1006 !target->allow_ext_sg)) { 1007 shost_printk(KERN_ERR, target->scsi_host, 1008 "Could not fit S/G list into SRP_CMD\n"); 1009 return -EIO; 1010 } 1011 1012 count = min(state.ndesc, target->cmd_sg_cnt); 1013 table_len = state.ndesc * sizeof (struct srp_direct_buf); 1014 1015 fmt = SRP_DATA_DESC_INDIRECT; 1016 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); 1017 len += count * sizeof (struct srp_direct_buf); 1018 1019 memcpy(indirect_hdr->desc_list, req->indirect_desc, 1020 count * sizeof (struct srp_direct_buf)); 1021 1022 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1023 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey); 1024 indirect_hdr->table_desc.len = cpu_to_be32(table_len); 1025 indirect_hdr->len = cpu_to_be32(state.total_len); 1026 1027 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1028 cmd->data_out_desc_cnt = count; 1029 else 1030 cmd->data_in_desc_cnt = count; 1031 1032 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 1033 DMA_TO_DEVICE); 1034 1035 map_complete: 1036 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1037 cmd->buf_fmt = fmt << 4; 1038 else 1039 cmd->buf_fmt = fmt; 1040 1041 return len; 1042 } 1043 1044 /* 1045 * Return an IU and possible credit to the free pool 1046 */ 1047 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, 1048 enum srp_iu_type iu_type) 1049 { 1050 unsigned long flags; 1051 1052 spin_lock_irqsave(&target->lock, flags); 1053 list_add(&iu->list, &target->free_tx); 1054 if (iu_type != SRP_IU_RSP) 1055 ++target->req_lim; 1056 spin_unlock_irqrestore(&target->lock, flags); 1057 } 1058 1059 /* 1060 * Must be called with target->lock held to protect req_lim and free_tx. 1061 * If IU is not sent, it must be returned using srp_put_tx_iu(). 1062 * 1063 * Note: 1064 * An upper limit for the number of allocated information units for each 1065 * request type is: 1066 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 1067 * more than Scsi_Host.can_queue requests. 1068 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 1069 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 1070 * one unanswered SRP request to an initiator. 1071 */ 1072 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 1073 enum srp_iu_type iu_type) 1074 { 1075 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 1076 struct srp_iu *iu; 1077 1078 srp_send_completion(target->send_cq, target); 1079 1080 if (list_empty(&target->free_tx)) 1081 return NULL; 1082 1083 /* Initiator responses to target requests do not consume credits */ 1084 if (iu_type != SRP_IU_RSP) { 1085 if (target->req_lim <= rsv) { 1086 ++target->zero_req_lim; 1087 return NULL; 1088 } 1089 1090 --target->req_lim; 1091 } 1092 1093 iu = list_first_entry(&target->free_tx, struct srp_iu, list); 1094 list_del(&iu->list); 1095 return iu; 1096 } 1097 1098 static int srp_post_send(struct srp_target_port *target, 1099 struct srp_iu *iu, int len) 1100 { 1101 struct ib_sge list; 1102 struct ib_send_wr wr, *bad_wr; 1103 1104 list.addr = iu->dma; 1105 list.length = len; 1106 list.lkey = target->lkey; 1107 1108 wr.next = NULL; 1109 wr.wr_id = (uintptr_t) iu; 1110 wr.sg_list = &list; 1111 wr.num_sge = 1; 1112 wr.opcode = IB_WR_SEND; 1113 wr.send_flags = IB_SEND_SIGNALED; 1114 1115 return ib_post_send(target->qp, &wr, &bad_wr); 1116 } 1117 1118 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) 1119 { 1120 struct ib_recv_wr wr, *bad_wr; 1121 struct ib_sge list; 1122 1123 list.addr = iu->dma; 1124 list.length = iu->size; 1125 list.lkey = target->lkey; 1126 1127 wr.next = NULL; 1128 wr.wr_id = (uintptr_t) iu; 1129 wr.sg_list = &list; 1130 wr.num_sge = 1; 1131 1132 return ib_post_recv(target->qp, &wr, &bad_wr); 1133 } 1134 1135 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 1136 { 1137 struct srp_request *req; 1138 struct scsi_cmnd *scmnd; 1139 unsigned long flags; 1140 1141 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1142 spin_lock_irqsave(&target->lock, flags); 1143 target->req_lim += be32_to_cpu(rsp->req_lim_delta); 1144 spin_unlock_irqrestore(&target->lock, flags); 1145 1146 target->tsk_mgmt_status = -1; 1147 if (be32_to_cpu(rsp->resp_data_len) >= 4) 1148 target->tsk_mgmt_status = rsp->data[3]; 1149 complete(&target->tsk_mgmt_done); 1150 } else { 1151 req = &target->req_ring[rsp->tag]; 1152 scmnd = srp_claim_req(target, req, NULL); 1153 if (!scmnd) { 1154 shost_printk(KERN_ERR, target->scsi_host, 1155 "Null scmnd for RSP w/tag %016llx\n", 1156 (unsigned long long) rsp->tag); 1157 1158 spin_lock_irqsave(&target->lock, flags); 1159 target->req_lim += be32_to_cpu(rsp->req_lim_delta); 1160 spin_unlock_irqrestore(&target->lock, flags); 1161 1162 return; 1163 } 1164 scmnd->result = rsp->status; 1165 1166 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1167 memcpy(scmnd->sense_buffer, rsp->data + 1168 be32_to_cpu(rsp->resp_data_len), 1169 min_t(int, be32_to_cpu(rsp->sense_data_len), 1170 SCSI_SENSE_BUFFERSIZE)); 1171 } 1172 1173 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 1174 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 1175 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 1176 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1177 1178 srp_free_req(target, req, scmnd, 1179 be32_to_cpu(rsp->req_lim_delta)); 1180 1181 scmnd->host_scribble = NULL; 1182 scmnd->scsi_done(scmnd); 1183 } 1184 } 1185 1186 static int srp_response_common(struct srp_target_port *target, s32 req_delta, 1187 void *rsp, int len) 1188 { 1189 struct ib_device *dev = target->srp_host->srp_dev->dev; 1190 unsigned long flags; 1191 struct srp_iu *iu; 1192 int err; 1193 1194 spin_lock_irqsave(&target->lock, flags); 1195 target->req_lim += req_delta; 1196 iu = __srp_get_tx_iu(target, SRP_IU_RSP); 1197 spin_unlock_irqrestore(&target->lock, flags); 1198 1199 if (!iu) { 1200 shost_printk(KERN_ERR, target->scsi_host, PFX 1201 "no IU available to send response\n"); 1202 return 1; 1203 } 1204 1205 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 1206 memcpy(iu->buf, rsp, len); 1207 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1208 1209 err = srp_post_send(target, iu, len); 1210 if (err) { 1211 shost_printk(KERN_ERR, target->scsi_host, PFX 1212 "unable to post response: %d\n", err); 1213 srp_put_tx_iu(target, iu, SRP_IU_RSP); 1214 } 1215 1216 return err; 1217 } 1218 1219 static void srp_process_cred_req(struct srp_target_port *target, 1220 struct srp_cred_req *req) 1221 { 1222 struct srp_cred_rsp rsp = { 1223 .opcode = SRP_CRED_RSP, 1224 .tag = req->tag, 1225 }; 1226 s32 delta = be32_to_cpu(req->req_lim_delta); 1227 1228 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1229 shost_printk(KERN_ERR, target->scsi_host, PFX 1230 "problems processing SRP_CRED_REQ\n"); 1231 } 1232 1233 static void srp_process_aer_req(struct srp_target_port *target, 1234 struct srp_aer_req *req) 1235 { 1236 struct srp_aer_rsp rsp = { 1237 .opcode = SRP_AER_RSP, 1238 .tag = req->tag, 1239 }; 1240 s32 delta = be32_to_cpu(req->req_lim_delta); 1241 1242 shost_printk(KERN_ERR, target->scsi_host, PFX 1243 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); 1244 1245 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1246 shost_printk(KERN_ERR, target->scsi_host, PFX 1247 "problems processing SRP_AER_REQ\n"); 1248 } 1249 1250 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1251 { 1252 struct ib_device *dev = target->srp_host->srp_dev->dev; 1253 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; 1254 int res; 1255 u8 opcode; 1256 1257 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 1258 DMA_FROM_DEVICE); 1259 1260 opcode = *(u8 *) iu->buf; 1261 1262 if (0) { 1263 shost_printk(KERN_ERR, target->scsi_host, 1264 PFX "recv completion, opcode 0x%02x\n", opcode); 1265 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 1266 iu->buf, wc->byte_len, true); 1267 } 1268 1269 switch (opcode) { 1270 case SRP_RSP: 1271 srp_process_rsp(target, iu->buf); 1272 break; 1273 1274 case SRP_CRED_REQ: 1275 srp_process_cred_req(target, iu->buf); 1276 break; 1277 1278 case SRP_AER_REQ: 1279 srp_process_aer_req(target, iu->buf); 1280 break; 1281 1282 case SRP_T_LOGOUT: 1283 /* XXX Handle target logout */ 1284 shost_printk(KERN_WARNING, target->scsi_host, 1285 PFX "Got target logout request\n"); 1286 break; 1287 1288 default: 1289 shost_printk(KERN_WARNING, target->scsi_host, 1290 PFX "Unhandled SRP opcode 0x%02x\n", opcode); 1291 break; 1292 } 1293 1294 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 1295 DMA_FROM_DEVICE); 1296 1297 res = srp_post_recv(target, iu); 1298 if (res != 0) 1299 shost_printk(KERN_ERR, target->scsi_host, 1300 PFX "Recv failed with error code %d\n", res); 1301 } 1302 1303 static void srp_handle_qp_err(enum ib_wc_status wc_status, 1304 enum ib_wc_opcode wc_opcode, 1305 struct srp_target_port *target) 1306 { 1307 if (target->connected && !target->qp_in_error) { 1308 shost_printk(KERN_ERR, target->scsi_host, 1309 PFX "failed %s status %d\n", 1310 wc_opcode & IB_WC_RECV ? "receive" : "send", 1311 wc_status); 1312 } 1313 target->qp_in_error = true; 1314 } 1315 1316 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) 1317 { 1318 struct srp_target_port *target = target_ptr; 1319 struct ib_wc wc; 1320 1321 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1322 while (ib_poll_cq(cq, 1, &wc) > 0) { 1323 if (likely(wc.status == IB_WC_SUCCESS)) { 1324 srp_handle_recv(target, &wc); 1325 } else { 1326 srp_handle_qp_err(wc.status, wc.opcode, target); 1327 } 1328 } 1329 } 1330 1331 static void srp_send_completion(struct ib_cq *cq, void *target_ptr) 1332 { 1333 struct srp_target_port *target = target_ptr; 1334 struct ib_wc wc; 1335 struct srp_iu *iu; 1336 1337 while (ib_poll_cq(cq, 1, &wc) > 0) { 1338 if (likely(wc.status == IB_WC_SUCCESS)) { 1339 iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 1340 list_add(&iu->list, &target->free_tx); 1341 } else { 1342 srp_handle_qp_err(wc.status, wc.opcode, target); 1343 } 1344 } 1345 } 1346 1347 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 1348 { 1349 struct srp_target_port *target = host_to_target(shost); 1350 struct srp_request *req; 1351 struct srp_iu *iu; 1352 struct srp_cmd *cmd; 1353 struct ib_device *dev; 1354 unsigned long flags; 1355 int len; 1356 1357 if (unlikely(target->transport_offline)) { 1358 scmnd->result = DID_NO_CONNECT << 16; 1359 scmnd->scsi_done(scmnd); 1360 return 0; 1361 } 1362 1363 spin_lock_irqsave(&target->lock, flags); 1364 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1365 if (!iu) 1366 goto err_unlock; 1367 1368 req = list_first_entry(&target->free_reqs, struct srp_request, list); 1369 list_del(&req->list); 1370 spin_unlock_irqrestore(&target->lock, flags); 1371 1372 dev = target->srp_host->srp_dev->dev; 1373 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 1374 DMA_TO_DEVICE); 1375 1376 scmnd->result = 0; 1377 scmnd->host_scribble = (void *) req; 1378 1379 cmd = iu->buf; 1380 memset(cmd, 0, sizeof *cmd); 1381 1382 cmd->opcode = SRP_CMD; 1383 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1384 cmd->tag = req->index; 1385 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 1386 1387 req->scmnd = scmnd; 1388 req->cmd = iu; 1389 1390 len = srp_map_data(scmnd, target, req); 1391 if (len < 0) { 1392 shost_printk(KERN_ERR, target->scsi_host, 1393 PFX "Failed to map data\n"); 1394 goto err_iu; 1395 } 1396 1397 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 1398 DMA_TO_DEVICE); 1399 1400 if (srp_post_send(target, iu, len)) { 1401 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 1402 goto err_unmap; 1403 } 1404 1405 return 0; 1406 1407 err_unmap: 1408 srp_unmap_data(scmnd, target, req); 1409 1410 err_iu: 1411 srp_put_tx_iu(target, iu, SRP_IU_CMD); 1412 1413 spin_lock_irqsave(&target->lock, flags); 1414 list_add(&req->list, &target->free_reqs); 1415 1416 err_unlock: 1417 spin_unlock_irqrestore(&target->lock, flags); 1418 1419 return SCSI_MLQUEUE_HOST_BUSY; 1420 } 1421 1422 static int srp_alloc_iu_bufs(struct srp_target_port *target) 1423 { 1424 int i; 1425 1426 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1427 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 1428 target->max_ti_iu_len, 1429 GFP_KERNEL, DMA_FROM_DEVICE); 1430 if (!target->rx_ring[i]) 1431 goto err; 1432 } 1433 1434 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1435 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1436 target->max_iu_len, 1437 GFP_KERNEL, DMA_TO_DEVICE); 1438 if (!target->tx_ring[i]) 1439 goto err; 1440 1441 list_add(&target->tx_ring[i]->list, &target->free_tx); 1442 } 1443 1444 return 0; 1445 1446 err: 1447 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1448 srp_free_iu(target->srp_host, target->rx_ring[i]); 1449 target->rx_ring[i] = NULL; 1450 } 1451 1452 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1453 srp_free_iu(target->srp_host, target->tx_ring[i]); 1454 target->tx_ring[i] = NULL; 1455 } 1456 1457 return -ENOMEM; 1458 } 1459 1460 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) 1461 { 1462 uint64_t T_tr_ns, max_compl_time_ms; 1463 uint32_t rq_tmo_jiffies; 1464 1465 /* 1466 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, 1467 * table 91), both the QP timeout and the retry count have to be set 1468 * for RC QP's during the RTR to RTS transition. 1469 */ 1470 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != 1471 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); 1472 1473 /* 1474 * Set target->rq_tmo_jiffies to one second more than the largest time 1475 * it can take before an error completion is generated. See also 1476 * C9-140..142 in the IBTA spec for more information about how to 1477 * convert the QP Local ACK Timeout value to nanoseconds. 1478 */ 1479 T_tr_ns = 4096 * (1ULL << qp_attr->timeout); 1480 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; 1481 do_div(max_compl_time_ms, NSEC_PER_MSEC); 1482 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); 1483 1484 return rq_tmo_jiffies; 1485 } 1486 1487 static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 1488 struct srp_login_rsp *lrsp, 1489 struct srp_target_port *target) 1490 { 1491 struct ib_qp_attr *qp_attr = NULL; 1492 int attr_mask = 0; 1493 int ret; 1494 int i; 1495 1496 if (lrsp->opcode == SRP_LOGIN_RSP) { 1497 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 1498 target->req_lim = be32_to_cpu(lrsp->req_lim_delta); 1499 1500 /* 1501 * Reserve credits for task management so we don't 1502 * bounce requests back to the SCSI mid-layer. 1503 */ 1504 target->scsi_host->can_queue 1505 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, 1506 target->scsi_host->can_queue); 1507 } else { 1508 shost_printk(KERN_WARNING, target->scsi_host, 1509 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 1510 ret = -ECONNRESET; 1511 goto error; 1512 } 1513 1514 if (!target->rx_ring[0]) { 1515 ret = srp_alloc_iu_bufs(target); 1516 if (ret) 1517 goto error; 1518 } 1519 1520 ret = -ENOMEM; 1521 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1522 if (!qp_attr) 1523 goto error; 1524 1525 qp_attr->qp_state = IB_QPS_RTR; 1526 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1527 if (ret) 1528 goto error_free; 1529 1530 ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 1531 if (ret) 1532 goto error_free; 1533 1534 for (i = 0; i < SRP_RQ_SIZE; i++) { 1535 struct srp_iu *iu = target->rx_ring[i]; 1536 ret = srp_post_recv(target, iu); 1537 if (ret) 1538 goto error_free; 1539 } 1540 1541 qp_attr->qp_state = IB_QPS_RTS; 1542 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1543 if (ret) 1544 goto error_free; 1545 1546 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 1547 1548 ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 1549 if (ret) 1550 goto error_free; 1551 1552 ret = ib_send_cm_rtu(cm_id, NULL, 0); 1553 1554 error_free: 1555 kfree(qp_attr); 1556 1557 error: 1558 target->status = ret; 1559 } 1560 1561 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 1562 struct ib_cm_event *event, 1563 struct srp_target_port *target) 1564 { 1565 struct Scsi_Host *shost = target->scsi_host; 1566 struct ib_class_port_info *cpi; 1567 int opcode; 1568 1569 switch (event->param.rej_rcvd.reason) { 1570 case IB_CM_REJ_PORT_CM_REDIRECT: 1571 cpi = event->param.rej_rcvd.ari; 1572 target->path.dlid = cpi->redirect_lid; 1573 target->path.pkey = cpi->redirect_pkey; 1574 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 1575 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 1576 1577 target->status = target->path.dlid ? 1578 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 1579 break; 1580 1581 case IB_CM_REJ_PORT_REDIRECT: 1582 if (srp_target_is_topspin(target)) { 1583 /* 1584 * Topspin/Cisco SRP gateways incorrectly send 1585 * reject reason code 25 when they mean 24 1586 * (port redirect). 1587 */ 1588 memcpy(target->path.dgid.raw, 1589 event->param.rej_rcvd.ari, 16); 1590 1591 shost_printk(KERN_DEBUG, shost, 1592 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1593 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1594 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1595 1596 target->status = SRP_PORT_REDIRECT; 1597 } else { 1598 shost_printk(KERN_WARNING, shost, 1599 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1600 target->status = -ECONNRESET; 1601 } 1602 break; 1603 1604 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1605 shost_printk(KERN_WARNING, shost, 1606 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1607 target->status = -ECONNRESET; 1608 break; 1609 1610 case IB_CM_REJ_CONSUMER_DEFINED: 1611 opcode = *(u8 *) event->private_data; 1612 if (opcode == SRP_LOGIN_REJ) { 1613 struct srp_login_rej *rej = event->private_data; 1614 u32 reason = be32_to_cpu(rej->reason); 1615 1616 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1617 shost_printk(KERN_WARNING, shost, 1618 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1619 else 1620 shost_printk(KERN_WARNING, shost, 1621 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1622 } else 1623 shost_printk(KERN_WARNING, shost, 1624 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1625 " opcode 0x%02x\n", opcode); 1626 target->status = -ECONNRESET; 1627 break; 1628 1629 case IB_CM_REJ_STALE_CONN: 1630 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 1631 target->status = SRP_STALE_CONN; 1632 break; 1633 1634 default: 1635 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 1636 event->param.rej_rcvd.reason); 1637 target->status = -ECONNRESET; 1638 } 1639 } 1640 1641 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1642 { 1643 struct srp_target_port *target = cm_id->context; 1644 int comp = 0; 1645 1646 switch (event->event) { 1647 case IB_CM_REQ_ERROR: 1648 shost_printk(KERN_DEBUG, target->scsi_host, 1649 PFX "Sending CM REQ failed\n"); 1650 comp = 1; 1651 target->status = -ECONNRESET; 1652 break; 1653 1654 case IB_CM_REP_RECEIVED: 1655 comp = 1; 1656 srp_cm_rep_handler(cm_id, event->private_data, target); 1657 break; 1658 1659 case IB_CM_REJ_RECEIVED: 1660 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 1661 comp = 1; 1662 1663 srp_cm_rej_handler(cm_id, event, target); 1664 break; 1665 1666 case IB_CM_DREQ_RECEIVED: 1667 shost_printk(KERN_WARNING, target->scsi_host, 1668 PFX "DREQ received - connection closed\n"); 1669 srp_change_conn_state(target, false); 1670 if (ib_send_cm_drep(cm_id, NULL, 0)) 1671 shost_printk(KERN_ERR, target->scsi_host, 1672 PFX "Sending CM DREP failed\n"); 1673 break; 1674 1675 case IB_CM_TIMEWAIT_EXIT: 1676 shost_printk(KERN_ERR, target->scsi_host, 1677 PFX "connection closed\n"); 1678 1679 target->status = 0; 1680 break; 1681 1682 case IB_CM_MRA_RECEIVED: 1683 case IB_CM_DREQ_ERROR: 1684 case IB_CM_DREP_RECEIVED: 1685 break; 1686 1687 default: 1688 shost_printk(KERN_WARNING, target->scsi_host, 1689 PFX "Unhandled CM event %d\n", event->event); 1690 break; 1691 } 1692 1693 if (comp) 1694 complete(&target->done); 1695 1696 return 0; 1697 } 1698 1699 static int srp_send_tsk_mgmt(struct srp_target_port *target, 1700 u64 req_tag, unsigned int lun, u8 func) 1701 { 1702 struct ib_device *dev = target->srp_host->srp_dev->dev; 1703 struct srp_iu *iu; 1704 struct srp_tsk_mgmt *tsk_mgmt; 1705 1706 if (!target->connected || target->qp_in_error) 1707 return -1; 1708 1709 init_completion(&target->tsk_mgmt_done); 1710 1711 spin_lock_irq(&target->lock); 1712 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 1713 spin_unlock_irq(&target->lock); 1714 1715 if (!iu) 1716 return -1; 1717 1718 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 1719 DMA_TO_DEVICE); 1720 tsk_mgmt = iu->buf; 1721 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1722 1723 tsk_mgmt->opcode = SRP_TSK_MGMT; 1724 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48); 1725 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; 1726 tsk_mgmt->tsk_mgmt_func = func; 1727 tsk_mgmt->task_tag = req_tag; 1728 1729 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 1730 DMA_TO_DEVICE); 1731 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { 1732 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); 1733 return -1; 1734 } 1735 1736 if (!wait_for_completion_timeout(&target->tsk_mgmt_done, 1737 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1738 return -1; 1739 1740 return 0; 1741 } 1742 1743 static int srp_abort(struct scsi_cmnd *scmnd) 1744 { 1745 struct srp_target_port *target = host_to_target(scmnd->device->host); 1746 struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 1747 1748 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1749 1750 if (!req || !srp_claim_req(target, req, scmnd)) 1751 return FAILED; 1752 srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, 1753 SRP_TSK_ABORT_TASK); 1754 srp_free_req(target, req, scmnd, 0); 1755 scmnd->result = DID_ABORT << 16; 1756 scmnd->scsi_done(scmnd); 1757 1758 return SUCCESS; 1759 } 1760 1761 static int srp_reset_device(struct scsi_cmnd *scmnd) 1762 { 1763 struct srp_target_port *target = host_to_target(scmnd->device->host); 1764 int i; 1765 1766 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 1767 1768 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, 1769 SRP_TSK_LUN_RESET)) 1770 return FAILED; 1771 if (target->tsk_mgmt_status) 1772 return FAILED; 1773 1774 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1775 struct srp_request *req = &target->req_ring[i]; 1776 if (req->scmnd && req->scmnd->device == scmnd->device) 1777 srp_reset_req(target, req); 1778 } 1779 1780 return SUCCESS; 1781 } 1782 1783 static int srp_reset_host(struct scsi_cmnd *scmnd) 1784 { 1785 struct srp_target_port *target = host_to_target(scmnd->device->host); 1786 int ret = FAILED; 1787 1788 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 1789 1790 if (!srp_reconnect_target(target)) 1791 ret = SUCCESS; 1792 1793 return ret; 1794 } 1795 1796 static int srp_slave_configure(struct scsi_device *sdev) 1797 { 1798 struct Scsi_Host *shost = sdev->host; 1799 struct srp_target_port *target = host_to_target(shost); 1800 struct request_queue *q = sdev->request_queue; 1801 unsigned long timeout; 1802 1803 if (sdev->type == TYPE_DISK) { 1804 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); 1805 blk_queue_rq_timeout(q, timeout); 1806 } 1807 1808 return 0; 1809 } 1810 1811 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 1812 char *buf) 1813 { 1814 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1815 1816 return sprintf(buf, "0x%016llx\n", 1817 (unsigned long long) be64_to_cpu(target->id_ext)); 1818 } 1819 1820 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 1821 char *buf) 1822 { 1823 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1824 1825 return sprintf(buf, "0x%016llx\n", 1826 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1827 } 1828 1829 static ssize_t show_service_id(struct device *dev, 1830 struct device_attribute *attr, char *buf) 1831 { 1832 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1833 1834 return sprintf(buf, "0x%016llx\n", 1835 (unsigned long long) be64_to_cpu(target->service_id)); 1836 } 1837 1838 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 1839 char *buf) 1840 { 1841 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1842 1843 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1844 } 1845 1846 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 1847 char *buf) 1848 { 1849 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1850 1851 return sprintf(buf, "%pI6\n", target->path.dgid.raw); 1852 } 1853 1854 static ssize_t show_orig_dgid(struct device *dev, 1855 struct device_attribute *attr, char *buf) 1856 { 1857 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1858 1859 return sprintf(buf, "%pI6\n", target->orig_dgid); 1860 } 1861 1862 static ssize_t show_req_lim(struct device *dev, 1863 struct device_attribute *attr, char *buf) 1864 { 1865 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1866 1867 return sprintf(buf, "%d\n", target->req_lim); 1868 } 1869 1870 static ssize_t show_zero_req_lim(struct device *dev, 1871 struct device_attribute *attr, char *buf) 1872 { 1873 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1874 1875 return sprintf(buf, "%d\n", target->zero_req_lim); 1876 } 1877 1878 static ssize_t show_local_ib_port(struct device *dev, 1879 struct device_attribute *attr, char *buf) 1880 { 1881 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1882 1883 return sprintf(buf, "%d\n", target->srp_host->port); 1884 } 1885 1886 static ssize_t show_local_ib_device(struct device *dev, 1887 struct device_attribute *attr, char *buf) 1888 { 1889 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1890 1891 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 1892 } 1893 1894 static ssize_t show_cmd_sg_entries(struct device *dev, 1895 struct device_attribute *attr, char *buf) 1896 { 1897 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1898 1899 return sprintf(buf, "%u\n", target->cmd_sg_cnt); 1900 } 1901 1902 static ssize_t show_allow_ext_sg(struct device *dev, 1903 struct device_attribute *attr, char *buf) 1904 { 1905 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1906 1907 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 1908 } 1909 1910 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1911 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1912 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1913 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1914 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1915 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 1916 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 1917 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1918 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 1919 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 1920 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 1921 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); 1922 1923 static struct device_attribute *srp_host_attrs[] = { 1924 &dev_attr_id_ext, 1925 &dev_attr_ioc_guid, 1926 &dev_attr_service_id, 1927 &dev_attr_pkey, 1928 &dev_attr_dgid, 1929 &dev_attr_orig_dgid, 1930 &dev_attr_req_lim, 1931 &dev_attr_zero_req_lim, 1932 &dev_attr_local_ib_port, 1933 &dev_attr_local_ib_device, 1934 &dev_attr_cmd_sg_entries, 1935 &dev_attr_allow_ext_sg, 1936 NULL 1937 }; 1938 1939 static struct scsi_host_template srp_template = { 1940 .module = THIS_MODULE, 1941 .name = "InfiniBand SRP initiator", 1942 .proc_name = DRV_NAME, 1943 .slave_configure = srp_slave_configure, 1944 .info = srp_target_info, 1945 .queuecommand = srp_queuecommand, 1946 .eh_abort_handler = srp_abort, 1947 .eh_device_reset_handler = srp_reset_device, 1948 .eh_host_reset_handler = srp_reset_host, 1949 .sg_tablesize = SRP_DEF_SG_TABLESIZE, 1950 .can_queue = SRP_CMD_SQ_SIZE, 1951 .this_id = -1, 1952 .cmd_per_lun = SRP_CMD_SQ_SIZE, 1953 .use_clustering = ENABLE_CLUSTERING, 1954 .shost_attrs = srp_host_attrs 1955 }; 1956 1957 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1958 { 1959 struct srp_rport_identifiers ids; 1960 struct srp_rport *rport; 1961 1962 sprintf(target->target_name, "SRP.T10:%016llX", 1963 (unsigned long long) be64_to_cpu(target->id_ext)); 1964 1965 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 1966 return -ENODEV; 1967 1968 memcpy(ids.port_id, &target->id_ext, 8); 1969 memcpy(ids.port_id + 8, &target->ioc_guid, 8); 1970 ids.roles = SRP_RPORT_ROLE_TARGET; 1971 rport = srp_rport_add(target->scsi_host, &ids); 1972 if (IS_ERR(rport)) { 1973 scsi_remove_host(target->scsi_host); 1974 return PTR_ERR(rport); 1975 } 1976 1977 rport->lld_data = target; 1978 1979 spin_lock(&host->target_lock); 1980 list_add_tail(&target->list, &host->target_list); 1981 spin_unlock(&host->target_lock); 1982 1983 target->state = SRP_TARGET_LIVE; 1984 1985 scsi_scan_target(&target->scsi_host->shost_gendev, 1986 0, target->scsi_id, SCAN_WILD_CARD, 0); 1987 1988 return 0; 1989 } 1990 1991 static void srp_release_dev(struct device *dev) 1992 { 1993 struct srp_host *host = 1994 container_of(dev, struct srp_host, dev); 1995 1996 complete(&host->released); 1997 } 1998 1999 static struct class srp_class = { 2000 .name = "infiniband_srp", 2001 .dev_release = srp_release_dev 2002 }; 2003 2004 /* 2005 * Target ports are added by writing 2006 * 2007 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 2008 * pkey=<P_Key>,service_id=<service ID> 2009 * 2010 * to the add_target sysfs attribute. 2011 */ 2012 enum { 2013 SRP_OPT_ERR = 0, 2014 SRP_OPT_ID_EXT = 1 << 0, 2015 SRP_OPT_IOC_GUID = 1 << 1, 2016 SRP_OPT_DGID = 1 << 2, 2017 SRP_OPT_PKEY = 1 << 3, 2018 SRP_OPT_SERVICE_ID = 1 << 4, 2019 SRP_OPT_MAX_SECT = 1 << 5, 2020 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 2021 SRP_OPT_IO_CLASS = 1 << 7, 2022 SRP_OPT_INITIATOR_EXT = 1 << 8, 2023 SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 2024 SRP_OPT_ALLOW_EXT_SG = 1 << 10, 2025 SRP_OPT_SG_TABLESIZE = 1 << 11, 2026 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 2027 SRP_OPT_IOC_GUID | 2028 SRP_OPT_DGID | 2029 SRP_OPT_PKEY | 2030 SRP_OPT_SERVICE_ID), 2031 }; 2032 2033 static const match_table_t srp_opt_tokens = { 2034 { SRP_OPT_ID_EXT, "id_ext=%s" }, 2035 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 2036 { SRP_OPT_DGID, "dgid=%s" }, 2037 { SRP_OPT_PKEY, "pkey=%x" }, 2038 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 2039 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 2040 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 2041 { SRP_OPT_IO_CLASS, "io_class=%x" }, 2042 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 2043 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 2044 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 2045 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 2046 { SRP_OPT_ERR, NULL } 2047 }; 2048 2049 static int srp_parse_options(const char *buf, struct srp_target_port *target) 2050 { 2051 char *options, *sep_opt; 2052 char *p; 2053 char dgid[3]; 2054 substring_t args[MAX_OPT_ARGS]; 2055 int opt_mask = 0; 2056 int token; 2057 int ret = -EINVAL; 2058 int i; 2059 2060 options = kstrdup(buf, GFP_KERNEL); 2061 if (!options) 2062 return -ENOMEM; 2063 2064 sep_opt = options; 2065 while ((p = strsep(&sep_opt, ",")) != NULL) { 2066 if (!*p) 2067 continue; 2068 2069 token = match_token(p, srp_opt_tokens, args); 2070 opt_mask |= token; 2071 2072 switch (token) { 2073 case SRP_OPT_ID_EXT: 2074 p = match_strdup(args); 2075 if (!p) { 2076 ret = -ENOMEM; 2077 goto out; 2078 } 2079 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2080 kfree(p); 2081 break; 2082 2083 case SRP_OPT_IOC_GUID: 2084 p = match_strdup(args); 2085 if (!p) { 2086 ret = -ENOMEM; 2087 goto out; 2088 } 2089 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2090 kfree(p); 2091 break; 2092 2093 case SRP_OPT_DGID: 2094 p = match_strdup(args); 2095 if (!p) { 2096 ret = -ENOMEM; 2097 goto out; 2098 } 2099 if (strlen(p) != 32) { 2100 pr_warn("bad dest GID parameter '%s'\n", p); 2101 kfree(p); 2102 goto out; 2103 } 2104 2105 for (i = 0; i < 16; ++i) { 2106 strlcpy(dgid, p + i * 2, 3); 2107 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 2108 } 2109 kfree(p); 2110 memcpy(target->orig_dgid, target->path.dgid.raw, 16); 2111 break; 2112 2113 case SRP_OPT_PKEY: 2114 if (match_hex(args, &token)) { 2115 pr_warn("bad P_Key parameter '%s'\n", p); 2116 goto out; 2117 } 2118 target->path.pkey = cpu_to_be16(token); 2119 break; 2120 2121 case SRP_OPT_SERVICE_ID: 2122 p = match_strdup(args); 2123 if (!p) { 2124 ret = -ENOMEM; 2125 goto out; 2126 } 2127 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2128 target->path.service_id = target->service_id; 2129 kfree(p); 2130 break; 2131 2132 case SRP_OPT_MAX_SECT: 2133 if (match_int(args, &token)) { 2134 pr_warn("bad max sect parameter '%s'\n", p); 2135 goto out; 2136 } 2137 target->scsi_host->max_sectors = token; 2138 break; 2139 2140 case SRP_OPT_MAX_CMD_PER_LUN: 2141 if (match_int(args, &token)) { 2142 pr_warn("bad max cmd_per_lun parameter '%s'\n", 2143 p); 2144 goto out; 2145 } 2146 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); 2147 break; 2148 2149 case SRP_OPT_IO_CLASS: 2150 if (match_hex(args, &token)) { 2151 pr_warn("bad IO class parameter '%s'\n", p); 2152 goto out; 2153 } 2154 if (token != SRP_REV10_IB_IO_CLASS && 2155 token != SRP_REV16A_IB_IO_CLASS) { 2156 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 2157 token, SRP_REV10_IB_IO_CLASS, 2158 SRP_REV16A_IB_IO_CLASS); 2159 goto out; 2160 } 2161 target->io_class = token; 2162 break; 2163 2164 case SRP_OPT_INITIATOR_EXT: 2165 p = match_strdup(args); 2166 if (!p) { 2167 ret = -ENOMEM; 2168 goto out; 2169 } 2170 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2171 kfree(p); 2172 break; 2173 2174 case SRP_OPT_CMD_SG_ENTRIES: 2175 if (match_int(args, &token) || token < 1 || token > 255) { 2176 pr_warn("bad max cmd_sg_entries parameter '%s'\n", 2177 p); 2178 goto out; 2179 } 2180 target->cmd_sg_cnt = token; 2181 break; 2182 2183 case SRP_OPT_ALLOW_EXT_SG: 2184 if (match_int(args, &token)) { 2185 pr_warn("bad allow_ext_sg parameter '%s'\n", p); 2186 goto out; 2187 } 2188 target->allow_ext_sg = !!token; 2189 break; 2190 2191 case SRP_OPT_SG_TABLESIZE: 2192 if (match_int(args, &token) || token < 1 || 2193 token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 2194 pr_warn("bad max sg_tablesize parameter '%s'\n", 2195 p); 2196 goto out; 2197 } 2198 target->sg_tablesize = token; 2199 break; 2200 2201 default: 2202 pr_warn("unknown parameter or missing value '%s' in target creation request\n", 2203 p); 2204 goto out; 2205 } 2206 } 2207 2208 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 2209 ret = 0; 2210 else 2211 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 2212 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 2213 !(srp_opt_tokens[i].token & opt_mask)) 2214 pr_warn("target creation request is missing parameter '%s'\n", 2215 srp_opt_tokens[i].pattern); 2216 2217 out: 2218 kfree(options); 2219 return ret; 2220 } 2221 2222 static ssize_t srp_create_target(struct device *dev, 2223 struct device_attribute *attr, 2224 const char *buf, size_t count) 2225 { 2226 struct srp_host *host = 2227 container_of(dev, struct srp_host, dev); 2228 struct Scsi_Host *target_host; 2229 struct srp_target_port *target; 2230 struct ib_device *ibdev = host->srp_dev->dev; 2231 dma_addr_t dma_addr; 2232 int i, ret; 2233 2234 target_host = scsi_host_alloc(&srp_template, 2235 sizeof (struct srp_target_port)); 2236 if (!target_host) 2237 return -ENOMEM; 2238 2239 target_host->transportt = ib_srp_transport_template; 2240 target_host->max_channel = 0; 2241 target_host->max_id = 1; 2242 target_host->max_lun = SRP_MAX_LUN; 2243 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 2244 2245 target = host_to_target(target_host); 2246 2247 target->io_class = SRP_REV16A_IB_IO_CLASS; 2248 target->scsi_host = target_host; 2249 target->srp_host = host; 2250 target->lkey = host->srp_dev->mr->lkey; 2251 target->rkey = host->srp_dev->mr->rkey; 2252 target->cmd_sg_cnt = cmd_sg_entries; 2253 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 2254 target->allow_ext_sg = allow_ext_sg; 2255 2256 ret = srp_parse_options(buf, target); 2257 if (ret) 2258 goto err; 2259 2260 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && 2261 target->cmd_sg_cnt < target->sg_tablesize) { 2262 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 2263 target->sg_tablesize = target->cmd_sg_cnt; 2264 } 2265 2266 target_host->sg_tablesize = target->sg_tablesize; 2267 target->indirect_size = target->sg_tablesize * 2268 sizeof (struct srp_direct_buf); 2269 target->max_iu_len = sizeof (struct srp_cmd) + 2270 sizeof (struct srp_indirect_buf) + 2271 target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 2272 2273 INIT_WORK(&target->remove_work, srp_remove_work); 2274 spin_lock_init(&target->lock); 2275 INIT_LIST_HEAD(&target->free_tx); 2276 INIT_LIST_HEAD(&target->free_reqs); 2277 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 2278 struct srp_request *req = &target->req_ring[i]; 2279 2280 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *), 2281 GFP_KERNEL); 2282 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *), 2283 GFP_KERNEL); 2284 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 2285 if (!req->fmr_list || !req->map_page || !req->indirect_desc) 2286 goto err_free_mem; 2287 2288 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 2289 target->indirect_size, 2290 DMA_TO_DEVICE); 2291 if (ib_dma_mapping_error(ibdev, dma_addr)) 2292 goto err_free_mem; 2293 2294 req->indirect_dma_addr = dma_addr; 2295 req->index = i; 2296 list_add_tail(&req->list, &target->free_reqs); 2297 } 2298 2299 ib_query_gid(ibdev, host->port, 0, &target->path.sgid); 2300 2301 shost_printk(KERN_DEBUG, target->scsi_host, PFX 2302 "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 2303 "service_id %016llx dgid %pI6\n", 2304 (unsigned long long) be64_to_cpu(target->id_ext), 2305 (unsigned long long) be64_to_cpu(target->ioc_guid), 2306 be16_to_cpu(target->path.pkey), 2307 (unsigned long long) be64_to_cpu(target->service_id), 2308 target->path.dgid.raw); 2309 2310 ret = srp_create_target_ib(target); 2311 if (ret) 2312 goto err_free_mem; 2313 2314 ret = srp_new_cm_id(target); 2315 if (ret) 2316 goto err_free_ib; 2317 2318 ret = srp_connect_target(target); 2319 if (ret) { 2320 shost_printk(KERN_ERR, target->scsi_host, 2321 PFX "Connection failed\n"); 2322 goto err_cm_id; 2323 } 2324 2325 ret = srp_add_target(host, target); 2326 if (ret) 2327 goto err_disconnect; 2328 2329 return count; 2330 2331 err_disconnect: 2332 srp_disconnect_target(target); 2333 2334 err_cm_id: 2335 ib_destroy_cm_id(target->cm_id); 2336 2337 err_free_ib: 2338 srp_free_target_ib(target); 2339 2340 err_free_mem: 2341 srp_free_req_data(target); 2342 2343 err: 2344 scsi_host_put(target_host); 2345 2346 return ret; 2347 } 2348 2349 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 2350 2351 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 2352 char *buf) 2353 { 2354 struct srp_host *host = container_of(dev, struct srp_host, dev); 2355 2356 return sprintf(buf, "%s\n", host->srp_dev->dev->name); 2357 } 2358 2359 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 2360 2361 static ssize_t show_port(struct device *dev, struct device_attribute *attr, 2362 char *buf) 2363 { 2364 struct srp_host *host = container_of(dev, struct srp_host, dev); 2365 2366 return sprintf(buf, "%d\n", host->port); 2367 } 2368 2369 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 2370 2371 static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 2372 { 2373 struct srp_host *host; 2374 2375 host = kzalloc(sizeof *host, GFP_KERNEL); 2376 if (!host) 2377 return NULL; 2378 2379 INIT_LIST_HEAD(&host->target_list); 2380 spin_lock_init(&host->target_lock); 2381 init_completion(&host->released); 2382 host->srp_dev = device; 2383 host->port = port; 2384 2385 host->dev.class = &srp_class; 2386 host->dev.parent = device->dev->dma_device; 2387 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 2388 2389 if (device_register(&host->dev)) 2390 goto free_host; 2391 if (device_create_file(&host->dev, &dev_attr_add_target)) 2392 goto err_class; 2393 if (device_create_file(&host->dev, &dev_attr_ibdev)) 2394 goto err_class; 2395 if (device_create_file(&host->dev, &dev_attr_port)) 2396 goto err_class; 2397 2398 return host; 2399 2400 err_class: 2401 device_unregister(&host->dev); 2402 2403 free_host: 2404 kfree(host); 2405 2406 return NULL; 2407 } 2408 2409 static void srp_add_one(struct ib_device *device) 2410 { 2411 struct srp_device *srp_dev; 2412 struct ib_device_attr *dev_attr; 2413 struct ib_fmr_pool_param fmr_param; 2414 struct srp_host *host; 2415 int max_pages_per_fmr, fmr_page_shift, s, e, p; 2416 2417 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 2418 if (!dev_attr) 2419 return; 2420 2421 if (ib_query_device(device, dev_attr)) { 2422 pr_warn("Query device failed for %s\n", device->name); 2423 goto free_attr; 2424 } 2425 2426 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 2427 if (!srp_dev) 2428 goto free_attr; 2429 2430 /* 2431 * Use the smallest page size supported by the HCA, down to a 2432 * minimum of 4096 bytes. We're unlikely to build large sglists 2433 * out of smaller entries. 2434 */ 2435 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); 2436 srp_dev->fmr_page_size = 1 << fmr_page_shift; 2437 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2438 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE; 2439 2440 INIT_LIST_HEAD(&srp_dev->dev_list); 2441 2442 srp_dev->dev = device; 2443 srp_dev->pd = ib_alloc_pd(device); 2444 if (IS_ERR(srp_dev->pd)) 2445 goto free_dev; 2446 2447 srp_dev->mr = ib_get_dma_mr(srp_dev->pd, 2448 IB_ACCESS_LOCAL_WRITE | 2449 IB_ACCESS_REMOTE_READ | 2450 IB_ACCESS_REMOTE_WRITE); 2451 if (IS_ERR(srp_dev->mr)) 2452 goto err_pd; 2453 2454 for (max_pages_per_fmr = SRP_FMR_SIZE; 2455 max_pages_per_fmr >= SRP_FMR_MIN_SIZE; 2456 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) { 2457 memset(&fmr_param, 0, sizeof fmr_param); 2458 fmr_param.pool_size = SRP_FMR_POOL_SIZE; 2459 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 2460 fmr_param.cache = 1; 2461 fmr_param.max_pages_per_fmr = max_pages_per_fmr; 2462 fmr_param.page_shift = fmr_page_shift; 2463 fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 2464 IB_ACCESS_REMOTE_WRITE | 2465 IB_ACCESS_REMOTE_READ); 2466 2467 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); 2468 if (!IS_ERR(srp_dev->fmr_pool)) 2469 break; 2470 } 2471 2472 if (IS_ERR(srp_dev->fmr_pool)) 2473 srp_dev->fmr_pool = NULL; 2474 2475 if (device->node_type == RDMA_NODE_IB_SWITCH) { 2476 s = 0; 2477 e = 0; 2478 } else { 2479 s = 1; 2480 e = device->phys_port_cnt; 2481 } 2482 2483 for (p = s; p <= e; ++p) { 2484 host = srp_add_port(srp_dev, p); 2485 if (host) 2486 list_add_tail(&host->list, &srp_dev->dev_list); 2487 } 2488 2489 ib_set_client_data(device, &srp_client, srp_dev); 2490 2491 goto free_attr; 2492 2493 err_pd: 2494 ib_dealloc_pd(srp_dev->pd); 2495 2496 free_dev: 2497 kfree(srp_dev); 2498 2499 free_attr: 2500 kfree(dev_attr); 2501 } 2502 2503 static void srp_remove_one(struct ib_device *device) 2504 { 2505 struct srp_device *srp_dev; 2506 struct srp_host *host, *tmp_host; 2507 struct srp_target_port *target; 2508 2509 srp_dev = ib_get_client_data(device, &srp_client); 2510 2511 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 2512 device_unregister(&host->dev); 2513 /* 2514 * Wait for the sysfs entry to go away, so that no new 2515 * target ports can be created. 2516 */ 2517 wait_for_completion(&host->released); 2518 2519 /* 2520 * Remove all target ports. 2521 */ 2522 spin_lock(&host->target_lock); 2523 list_for_each_entry(target, &host->target_list, list) 2524 srp_queue_remove_work(target); 2525 spin_unlock(&host->target_lock); 2526 2527 /* 2528 * Wait for target port removal tasks. 2529 */ 2530 flush_workqueue(system_long_wq); 2531 2532 kfree(host); 2533 } 2534 2535 if (srp_dev->fmr_pool) 2536 ib_destroy_fmr_pool(srp_dev->fmr_pool); 2537 ib_dereg_mr(srp_dev->mr); 2538 ib_dealloc_pd(srp_dev->pd); 2539 2540 kfree(srp_dev); 2541 } 2542 2543 static struct srp_function_template ib_srp_transport_functions = { 2544 .rport_delete = srp_rport_delete, 2545 }; 2546 2547 static int __init srp_init_module(void) 2548 { 2549 int ret; 2550 2551 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 2552 2553 if (srp_sg_tablesize) { 2554 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 2555 if (!cmd_sg_entries) 2556 cmd_sg_entries = srp_sg_tablesize; 2557 } 2558 2559 if (!cmd_sg_entries) 2560 cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 2561 2562 if (cmd_sg_entries > 255) { 2563 pr_warn("Clamping cmd_sg_entries to 255\n"); 2564 cmd_sg_entries = 255; 2565 } 2566 2567 if (!indirect_sg_entries) 2568 indirect_sg_entries = cmd_sg_entries; 2569 else if (indirect_sg_entries < cmd_sg_entries) { 2570 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 2571 cmd_sg_entries); 2572 indirect_sg_entries = cmd_sg_entries; 2573 } 2574 2575 ib_srp_transport_template = 2576 srp_attach_transport(&ib_srp_transport_functions); 2577 if (!ib_srp_transport_template) 2578 return -ENOMEM; 2579 2580 ret = class_register(&srp_class); 2581 if (ret) { 2582 pr_err("couldn't register class infiniband_srp\n"); 2583 srp_release_transport(ib_srp_transport_template); 2584 return ret; 2585 } 2586 2587 ib_sa_register_client(&srp_sa_client); 2588 2589 ret = ib_register_client(&srp_client); 2590 if (ret) { 2591 pr_err("couldn't register IB client\n"); 2592 srp_release_transport(ib_srp_transport_template); 2593 ib_sa_unregister_client(&srp_sa_client); 2594 class_unregister(&srp_class); 2595 return ret; 2596 } 2597 2598 return 0; 2599 } 2600 2601 static void __exit srp_cleanup_module(void) 2602 { 2603 ib_unregister_client(&srp_client); 2604 ib_sa_unregister_client(&srp_sa_client); 2605 class_unregister(&srp_class); 2606 srp_release_transport(ib_srp_transport_template); 2607 } 2608 2609 module_init(srp_init_module); 2610 module_exit(srp_cleanup_module); 2611