1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $ 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/string.h> 40 #include <linux/parser.h> 41 #include <linux/random.h> 42 #include <linux/jiffies.h> 43 44 #include <asm/atomic.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_dbg.h> 49 #include <scsi/srp.h> 50 51 #include <rdma/ib_cache.h> 52 53 #include "ib_srp.h" 54 55 #define DRV_NAME "ib_srp" 56 #define PFX DRV_NAME ": " 57 #define DRV_VERSION "0.2" 58 #define DRV_RELDATE "November 1, 2005" 59 60 MODULE_AUTHOR("Roland Dreier"); 61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 62 "v" DRV_VERSION " (" DRV_RELDATE ")"); 63 MODULE_LICENSE("Dual BSD/GPL"); 64 65 static int topspin_workarounds = 1; 66 67 module_param(topspin_workarounds, int, 0444); 68 MODULE_PARM_DESC(topspin_workarounds, 69 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 70 71 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 72 73 static void srp_add_one(struct ib_device *device); 74 static void srp_remove_one(struct ib_device *device); 75 static void srp_completion(struct ib_cq *cq, void *target_ptr); 76 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 77 78 static struct ib_client srp_client = { 79 .name = "srp", 80 .add = srp_add_one, 81 .remove = srp_remove_one 82 }; 83 84 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 85 { 86 return (struct srp_target_port *) host->hostdata; 87 } 88 89 static const char *srp_target_info(struct Scsi_Host *host) 90 { 91 return host_to_target(host)->target_name; 92 } 93 94 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 95 gfp_t gfp_mask, 96 enum dma_data_direction direction) 97 { 98 struct srp_iu *iu; 99 100 iu = kmalloc(sizeof *iu, gfp_mask); 101 if (!iu) 102 goto out; 103 104 iu->buf = kzalloc(size, gfp_mask); 105 if (!iu->buf) 106 goto out_free_iu; 107 108 iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction); 109 if (dma_mapping_error(iu->dma)) 110 goto out_free_buf; 111 112 iu->size = size; 113 iu->direction = direction; 114 115 return iu; 116 117 out_free_buf: 118 kfree(iu->buf); 119 out_free_iu: 120 kfree(iu); 121 out: 122 return NULL; 123 } 124 125 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 126 { 127 if (!iu) 128 return; 129 130 dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction); 131 kfree(iu->buf); 132 kfree(iu); 133 } 134 135 static void srp_qp_event(struct ib_event *event, void *context) 136 { 137 printk(KERN_ERR PFX "QP event %d\n", event->event); 138 } 139 140 static int srp_init_qp(struct srp_target_port *target, 141 struct ib_qp *qp) 142 { 143 struct ib_qp_attr *attr; 144 int ret; 145 146 attr = kmalloc(sizeof *attr, GFP_KERNEL); 147 if (!attr) 148 return -ENOMEM; 149 150 ret = ib_find_cached_pkey(target->srp_host->dev, 151 target->srp_host->port, 152 be16_to_cpu(target->path.pkey), 153 &attr->pkey_index); 154 if (ret) 155 goto out; 156 157 attr->qp_state = IB_QPS_INIT; 158 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 159 IB_ACCESS_REMOTE_WRITE); 160 attr->port_num = target->srp_host->port; 161 162 ret = ib_modify_qp(qp, attr, 163 IB_QP_STATE | 164 IB_QP_PKEY_INDEX | 165 IB_QP_ACCESS_FLAGS | 166 IB_QP_PORT); 167 168 out: 169 kfree(attr); 170 return ret; 171 } 172 173 static int srp_create_target_ib(struct srp_target_port *target) 174 { 175 struct ib_qp_init_attr *init_attr; 176 int ret; 177 178 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 179 if (!init_attr) 180 return -ENOMEM; 181 182 target->cq = ib_create_cq(target->srp_host->dev, srp_completion, 183 NULL, target, SRP_CQ_SIZE); 184 if (IS_ERR(target->cq)) { 185 ret = PTR_ERR(target->cq); 186 goto out; 187 } 188 189 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 190 191 init_attr->event_handler = srp_qp_event; 192 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 193 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 194 init_attr->cap.max_recv_sge = 1; 195 init_attr->cap.max_send_sge = 1; 196 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 197 init_attr->qp_type = IB_QPT_RC; 198 init_attr->send_cq = target->cq; 199 init_attr->recv_cq = target->cq; 200 201 target->qp = ib_create_qp(target->srp_host->pd, init_attr); 202 if (IS_ERR(target->qp)) { 203 ret = PTR_ERR(target->qp); 204 ib_destroy_cq(target->cq); 205 goto out; 206 } 207 208 ret = srp_init_qp(target, target->qp); 209 if (ret) { 210 ib_destroy_qp(target->qp); 211 ib_destroy_cq(target->cq); 212 goto out; 213 } 214 215 out: 216 kfree(init_attr); 217 return ret; 218 } 219 220 static void srp_free_target_ib(struct srp_target_port *target) 221 { 222 int i; 223 224 ib_destroy_qp(target->qp); 225 ib_destroy_cq(target->cq); 226 227 for (i = 0; i < SRP_RQ_SIZE; ++i) 228 srp_free_iu(target->srp_host, target->rx_ring[i]); 229 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 230 srp_free_iu(target->srp_host, target->tx_ring[i]); 231 } 232 233 static void srp_path_rec_completion(int status, 234 struct ib_sa_path_rec *pathrec, 235 void *target_ptr) 236 { 237 struct srp_target_port *target = target_ptr; 238 239 target->status = status; 240 if (status) 241 printk(KERN_ERR PFX "Got failed path rec status %d\n", status); 242 else 243 target->path = *pathrec; 244 complete(&target->done); 245 } 246 247 static int srp_lookup_path(struct srp_target_port *target) 248 { 249 target->path.numb_path = 1; 250 251 init_completion(&target->done); 252 253 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev, 254 target->srp_host->port, 255 &target->path, 256 IB_SA_PATH_REC_DGID | 257 IB_SA_PATH_REC_SGID | 258 IB_SA_PATH_REC_NUMB_PATH | 259 IB_SA_PATH_REC_PKEY, 260 SRP_PATH_REC_TIMEOUT_MS, 261 GFP_KERNEL, 262 srp_path_rec_completion, 263 target, &target->path_query); 264 if (target->path_query_id < 0) 265 return target->path_query_id; 266 267 wait_for_completion(&target->done); 268 269 if (target->status < 0) 270 printk(KERN_WARNING PFX "Path record query failed\n"); 271 272 return target->status; 273 } 274 275 static int srp_send_req(struct srp_target_port *target) 276 { 277 struct { 278 struct ib_cm_req_param param; 279 struct srp_login_req priv; 280 } *req = NULL; 281 int status; 282 283 req = kzalloc(sizeof *req, GFP_KERNEL); 284 if (!req) 285 return -ENOMEM; 286 287 req->param.primary_path = &target->path; 288 req->param.alternate_path = NULL; 289 req->param.service_id = target->service_id; 290 req->param.qp_num = target->qp->qp_num; 291 req->param.qp_type = target->qp->qp_type; 292 req->param.private_data = &req->priv; 293 req->param.private_data_len = sizeof req->priv; 294 req->param.flow_control = 1; 295 296 get_random_bytes(&req->param.starting_psn, 4); 297 req->param.starting_psn &= 0xffffff; 298 299 /* 300 * Pick some arbitrary defaults here; we could make these 301 * module parameters if anyone cared about setting them. 302 */ 303 req->param.responder_resources = 4; 304 req->param.remote_cm_response_timeout = 20; 305 req->param.local_cm_response_timeout = 20; 306 req->param.retry_count = 7; 307 req->param.rnr_retry_count = 7; 308 req->param.max_cm_retries = 15; 309 310 req->priv.opcode = SRP_LOGIN_REQ; 311 req->priv.tag = 0; 312 req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 313 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 314 SRP_BUF_FORMAT_INDIRECT); 315 memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); 316 /* 317 * Topspin/Cisco SRP targets will reject our login unless we 318 * zero out the first 8 bytes of our initiator port ID. The 319 * second 8 bytes must be our local node GUID, but we always 320 * use that anyway. 321 */ 322 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { 323 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " 324 "activated for target GUID %016llx\n", 325 (unsigned long long) be64_to_cpu(target->ioc_guid)); 326 memset(req->priv.initiator_port_id, 0, 8); 327 } 328 memcpy(req->priv.target_port_id, &target->id_ext, 8); 329 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 330 331 status = ib_send_cm_req(target->cm_id, &req->param); 332 333 kfree(req); 334 335 return status; 336 } 337 338 static void srp_disconnect_target(struct srp_target_port *target) 339 { 340 /* XXX should send SRP_I_LOGOUT request */ 341 342 init_completion(&target->done); 343 ib_send_cm_dreq(target->cm_id, NULL, 0); 344 wait_for_completion(&target->done); 345 } 346 347 static void srp_remove_work(void *target_ptr) 348 { 349 struct srp_target_port *target = target_ptr; 350 351 spin_lock_irq(target->scsi_host->host_lock); 352 if (target->state != SRP_TARGET_DEAD) { 353 spin_unlock_irq(target->scsi_host->host_lock); 354 scsi_host_put(target->scsi_host); 355 return; 356 } 357 target->state = SRP_TARGET_REMOVED; 358 spin_unlock_irq(target->scsi_host->host_lock); 359 360 down(&target->srp_host->target_mutex); 361 list_del(&target->list); 362 up(&target->srp_host->target_mutex); 363 364 scsi_remove_host(target->scsi_host); 365 ib_destroy_cm_id(target->cm_id); 366 srp_free_target_ib(target); 367 scsi_host_put(target->scsi_host); 368 /* And another put to really free the target port... */ 369 scsi_host_put(target->scsi_host); 370 } 371 372 static int srp_connect_target(struct srp_target_port *target) 373 { 374 int ret; 375 376 ret = srp_lookup_path(target); 377 if (ret) 378 return ret; 379 380 while (1) { 381 init_completion(&target->done); 382 ret = srp_send_req(target); 383 if (ret) 384 return ret; 385 wait_for_completion(&target->done); 386 387 /* 388 * The CM event handling code will set status to 389 * SRP_PORT_REDIRECT if we get a port redirect REJ 390 * back, or SRP_DLID_REDIRECT if we get a lid/qp 391 * redirect REJ back. 392 */ 393 switch (target->status) { 394 case 0: 395 return 0; 396 397 case SRP_PORT_REDIRECT: 398 ret = srp_lookup_path(target); 399 if (ret) 400 return ret; 401 break; 402 403 case SRP_DLID_REDIRECT: 404 break; 405 406 default: 407 return target->status; 408 } 409 } 410 } 411 412 static int srp_reconnect_target(struct srp_target_port *target) 413 { 414 struct ib_cm_id *new_cm_id; 415 struct ib_qp_attr qp_attr; 416 struct srp_request *req; 417 struct ib_wc wc; 418 int ret; 419 int i; 420 421 spin_lock_irq(target->scsi_host->host_lock); 422 if (target->state != SRP_TARGET_LIVE) { 423 spin_unlock_irq(target->scsi_host->host_lock); 424 return -EAGAIN; 425 } 426 target->state = SRP_TARGET_CONNECTING; 427 spin_unlock_irq(target->scsi_host->host_lock); 428 429 srp_disconnect_target(target); 430 /* 431 * Now get a new local CM ID so that we avoid confusing the 432 * target in case things are really fouled up. 433 */ 434 new_cm_id = ib_create_cm_id(target->srp_host->dev, 435 srp_cm_handler, target); 436 if (IS_ERR(new_cm_id)) { 437 ret = PTR_ERR(new_cm_id); 438 goto err; 439 } 440 ib_destroy_cm_id(target->cm_id); 441 target->cm_id = new_cm_id; 442 443 qp_attr.qp_state = IB_QPS_RESET; 444 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 445 if (ret) 446 goto err; 447 448 ret = srp_init_qp(target, target->qp); 449 if (ret) 450 goto err; 451 452 while (ib_poll_cq(target->cq, 1, &wc) > 0) 453 ; /* nothing */ 454 455 list_for_each_entry(req, &target->req_queue, list) { 456 req->scmnd->result = DID_RESET << 16; 457 req->scmnd->scsi_done(req->scmnd); 458 } 459 460 target->rx_head = 0; 461 target->tx_head = 0; 462 target->tx_tail = 0; 463 target->req_head = 0; 464 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 465 target->req_ring[i].next = i + 1; 466 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 467 INIT_LIST_HEAD(&target->req_queue); 468 469 ret = srp_connect_target(target); 470 if (ret) 471 goto err; 472 473 spin_lock_irq(target->scsi_host->host_lock); 474 if (target->state == SRP_TARGET_CONNECTING) { 475 ret = 0; 476 target->state = SRP_TARGET_LIVE; 477 } else 478 ret = -EAGAIN; 479 spin_unlock_irq(target->scsi_host->host_lock); 480 481 return ret; 482 483 err: 484 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret); 485 486 /* 487 * We couldn't reconnect, so kill our target port off. 488 * However, we have to defer the real removal because we might 489 * be in the context of the SCSI error handler now, which 490 * would deadlock if we call scsi_remove_host(). 491 */ 492 spin_lock_irq(target->scsi_host->host_lock); 493 if (target->state == SRP_TARGET_CONNECTING) { 494 target->state = SRP_TARGET_DEAD; 495 INIT_WORK(&target->work, srp_remove_work, target); 496 schedule_work(&target->work); 497 } 498 spin_unlock_irq(target->scsi_host->host_lock); 499 500 return ret; 501 } 502 503 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 504 struct srp_request *req) 505 { 506 struct srp_cmd *cmd = req->cmd->buf; 507 int len; 508 u8 fmt; 509 510 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 511 return sizeof (struct srp_cmd); 512 513 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 514 scmnd->sc_data_direction != DMA_TO_DEVICE) { 515 printk(KERN_WARNING PFX "Unhandled data direction %d\n", 516 scmnd->sc_data_direction); 517 return -EINVAL; 518 } 519 520 if (scmnd->use_sg) { 521 struct scatterlist *scat = scmnd->request_buffer; 522 int n; 523 int i; 524 525 n = dma_map_sg(target->srp_host->dev->dma_device, 526 scat, scmnd->use_sg, scmnd->sc_data_direction); 527 528 if (n == 1) { 529 struct srp_direct_buf *buf = (void *) cmd->add_data; 530 531 fmt = SRP_DATA_DESC_DIRECT; 532 533 buf->va = cpu_to_be64(sg_dma_address(scat)); 534 buf->key = cpu_to_be32(target->srp_host->mr->rkey); 535 buf->len = cpu_to_be32(sg_dma_len(scat)); 536 537 len = sizeof (struct srp_cmd) + 538 sizeof (struct srp_direct_buf); 539 } else { 540 struct srp_indirect_buf *buf = (void *) cmd->add_data; 541 u32 datalen = 0; 542 543 fmt = SRP_DATA_DESC_INDIRECT; 544 545 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 546 cmd->data_out_desc_cnt = n; 547 else 548 cmd->data_in_desc_cnt = n; 549 550 buf->table_desc.va = cpu_to_be64(req->cmd->dma + 551 sizeof *cmd + 552 sizeof *buf); 553 buf->table_desc.key = 554 cpu_to_be32(target->srp_host->mr->rkey); 555 buf->table_desc.len = 556 cpu_to_be32(n * sizeof (struct srp_direct_buf)); 557 558 for (i = 0; i < n; ++i) { 559 buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); 560 buf->desc_list[i].key = 561 cpu_to_be32(target->srp_host->mr->rkey); 562 buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); 563 564 datalen += sg_dma_len(&scat[i]); 565 } 566 567 buf->len = cpu_to_be32(datalen); 568 569 len = sizeof (struct srp_cmd) + 570 sizeof (struct srp_indirect_buf) + 571 n * sizeof (struct srp_direct_buf); 572 } 573 } else { 574 struct srp_direct_buf *buf = (void *) cmd->add_data; 575 dma_addr_t dma; 576 577 dma = dma_map_single(target->srp_host->dev->dma_device, 578 scmnd->request_buffer, scmnd->request_bufflen, 579 scmnd->sc_data_direction); 580 if (dma_mapping_error(dma)) { 581 printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n", 582 scmnd->request_buffer, (int) scmnd->request_bufflen, 583 scmnd->sc_data_direction); 584 return -EINVAL; 585 } 586 587 pci_unmap_addr_set(req, direct_mapping, dma); 588 589 buf->va = cpu_to_be64(dma); 590 buf->key = cpu_to_be32(target->srp_host->mr->rkey); 591 buf->len = cpu_to_be32(scmnd->request_bufflen); 592 593 fmt = SRP_DATA_DESC_DIRECT; 594 595 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 596 } 597 598 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 599 cmd->buf_fmt = fmt << 4; 600 else 601 cmd->buf_fmt = fmt; 602 603 604 return len; 605 } 606 607 static void srp_unmap_data(struct scsi_cmnd *scmnd, 608 struct srp_target_port *target, 609 struct srp_request *req) 610 { 611 if (!scmnd->request_buffer || 612 (scmnd->sc_data_direction != DMA_TO_DEVICE && 613 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 614 return; 615 616 if (scmnd->use_sg) 617 dma_unmap_sg(target->srp_host->dev->dma_device, 618 (struct scatterlist *) scmnd->request_buffer, 619 scmnd->use_sg, scmnd->sc_data_direction); 620 else 621 dma_unmap_single(target->srp_host->dev->dma_device, 622 pci_unmap_addr(req, direct_mapping), 623 scmnd->request_bufflen, 624 scmnd->sc_data_direction); 625 } 626 627 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 628 { 629 struct srp_request *req; 630 struct scsi_cmnd *scmnd; 631 unsigned long flags; 632 s32 delta; 633 634 delta = (s32) be32_to_cpu(rsp->req_lim_delta); 635 636 spin_lock_irqsave(target->scsi_host->host_lock, flags); 637 638 target->req_lim += delta; 639 640 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; 641 642 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 643 if (be32_to_cpu(rsp->resp_data_len) < 4) 644 req->tsk_status = -1; 645 else 646 req->tsk_status = rsp->data[3]; 647 complete(&req->done); 648 } else { 649 scmnd = req->scmnd; 650 if (!scmnd) 651 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 652 (unsigned long long) rsp->tag); 653 scmnd->result = rsp->status; 654 655 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 656 memcpy(scmnd->sense_buffer, rsp->data + 657 be32_to_cpu(rsp->resp_data_len), 658 min_t(int, be32_to_cpu(rsp->sense_data_len), 659 SCSI_SENSE_BUFFERSIZE)); 660 } 661 662 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 663 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); 664 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 665 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 666 667 srp_unmap_data(scmnd, target, req); 668 669 if (!req->tsk_mgmt) { 670 req->scmnd = NULL; 671 scmnd->host_scribble = (void *) -1L; 672 scmnd->scsi_done(scmnd); 673 674 list_del(&req->list); 675 req->next = target->req_head; 676 target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT; 677 } else 678 req->cmd_done = 1; 679 } 680 681 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 682 } 683 684 static void srp_reconnect_work(void *target_ptr) 685 { 686 struct srp_target_port *target = target_ptr; 687 688 srp_reconnect_target(target); 689 } 690 691 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 692 { 693 struct srp_iu *iu; 694 u8 opcode; 695 696 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 697 698 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 699 target->max_ti_iu_len, DMA_FROM_DEVICE); 700 701 opcode = *(u8 *) iu->buf; 702 703 if (0) { 704 int i; 705 706 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); 707 708 for (i = 0; i < wc->byte_len; ++i) { 709 if (i % 8 == 0) 710 printk(KERN_ERR " [%02x] ", i); 711 printk(" %02x", ((u8 *) iu->buf)[i]); 712 if ((i + 1) % 8 == 0) 713 printk("\n"); 714 } 715 716 if (wc->byte_len % 8) 717 printk("\n"); 718 } 719 720 switch (opcode) { 721 case SRP_RSP: 722 srp_process_rsp(target, iu->buf); 723 break; 724 725 case SRP_T_LOGOUT: 726 /* XXX Handle target logout */ 727 printk(KERN_WARNING PFX "Got target logout request\n"); 728 break; 729 730 default: 731 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); 732 break; 733 } 734 735 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 736 target->max_ti_iu_len, DMA_FROM_DEVICE); 737 } 738 739 static void srp_completion(struct ib_cq *cq, void *target_ptr) 740 { 741 struct srp_target_port *target = target_ptr; 742 struct ib_wc wc; 743 unsigned long flags; 744 745 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 746 while (ib_poll_cq(cq, 1, &wc) > 0) { 747 if (wc.status) { 748 printk(KERN_ERR PFX "failed %s status %d\n", 749 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 750 wc.status); 751 spin_lock_irqsave(target->scsi_host->host_lock, flags); 752 if (target->state == SRP_TARGET_LIVE) 753 schedule_work(&target->work); 754 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 755 break; 756 } 757 758 if (wc.wr_id & SRP_OP_RECV) 759 srp_handle_recv(target, &wc); 760 else 761 ++target->tx_tail; 762 } 763 } 764 765 static int __srp_post_recv(struct srp_target_port *target) 766 { 767 struct srp_iu *iu; 768 struct ib_sge list; 769 struct ib_recv_wr wr, *bad_wr; 770 unsigned int next; 771 int ret; 772 773 next = target->rx_head & (SRP_RQ_SIZE - 1); 774 wr.wr_id = next | SRP_OP_RECV; 775 iu = target->rx_ring[next]; 776 777 list.addr = iu->dma; 778 list.length = iu->size; 779 list.lkey = target->srp_host->mr->lkey; 780 781 wr.next = NULL; 782 wr.sg_list = &list; 783 wr.num_sge = 1; 784 785 ret = ib_post_recv(target->qp, &wr, &bad_wr); 786 if (!ret) 787 ++target->rx_head; 788 789 return ret; 790 } 791 792 static int srp_post_recv(struct srp_target_port *target) 793 { 794 unsigned long flags; 795 int ret; 796 797 spin_lock_irqsave(target->scsi_host->host_lock, flags); 798 ret = __srp_post_recv(target); 799 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 800 801 return ret; 802 } 803 804 /* 805 * Must be called with target->scsi_host->host_lock held to protect 806 * req_lim and tx_head. Lock cannot be dropped between call here and 807 * call to __srp_post_send(). 808 */ 809 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) 810 { 811 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 812 return NULL; 813 814 if (unlikely(target->req_lim < 1)) { 815 if (printk_ratelimit()) 816 printk(KERN_DEBUG PFX "Target has req_lim %d\n", 817 target->req_lim); 818 return NULL; 819 } 820 821 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 822 } 823 824 /* 825 * Must be called with target->scsi_host->host_lock held to protect 826 * req_lim and tx_head. 827 */ 828 static int __srp_post_send(struct srp_target_port *target, 829 struct srp_iu *iu, int len) 830 { 831 struct ib_sge list; 832 struct ib_send_wr wr, *bad_wr; 833 int ret = 0; 834 835 list.addr = iu->dma; 836 list.length = len; 837 list.lkey = target->srp_host->mr->lkey; 838 839 wr.next = NULL; 840 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 841 wr.sg_list = &list; 842 wr.num_sge = 1; 843 wr.opcode = IB_WR_SEND; 844 wr.send_flags = IB_SEND_SIGNALED; 845 846 ret = ib_post_send(target->qp, &wr, &bad_wr); 847 848 if (!ret) { 849 ++target->tx_head; 850 --target->req_lim; 851 } 852 853 return ret; 854 } 855 856 static int srp_queuecommand(struct scsi_cmnd *scmnd, 857 void (*done)(struct scsi_cmnd *)) 858 { 859 struct srp_target_port *target = host_to_target(scmnd->device->host); 860 struct srp_request *req; 861 struct srp_iu *iu; 862 struct srp_cmd *cmd; 863 long req_index; 864 int len; 865 866 if (target->state == SRP_TARGET_CONNECTING) 867 goto err; 868 869 if (target->state == SRP_TARGET_DEAD || 870 target->state == SRP_TARGET_REMOVED) { 871 scmnd->result = DID_BAD_TARGET << 16; 872 done(scmnd); 873 return 0; 874 } 875 876 iu = __srp_get_tx_iu(target); 877 if (!iu) 878 goto err; 879 880 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 881 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 882 883 req_index = target->req_head; 884 885 scmnd->scsi_done = done; 886 scmnd->result = 0; 887 scmnd->host_scribble = (void *) req_index; 888 889 cmd = iu->buf; 890 memset(cmd, 0, sizeof *cmd); 891 892 cmd->opcode = SRP_CMD; 893 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 894 cmd->tag = req_index; 895 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 896 897 req = &target->req_ring[req_index]; 898 899 req->scmnd = scmnd; 900 req->cmd = iu; 901 req->cmd_done = 0; 902 req->tsk_mgmt = NULL; 903 904 len = srp_map_data(scmnd, target, req); 905 if (len < 0) { 906 printk(KERN_ERR PFX "Failed to map data\n"); 907 goto err; 908 } 909 910 if (__srp_post_recv(target)) { 911 printk(KERN_ERR PFX "Recv failed\n"); 912 goto err_unmap; 913 } 914 915 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 916 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 917 918 if (__srp_post_send(target, iu, len)) { 919 printk(KERN_ERR PFX "Send failed\n"); 920 goto err_unmap; 921 } 922 923 target->req_head = req->next; 924 list_add_tail(&req->list, &target->req_queue); 925 926 return 0; 927 928 err_unmap: 929 srp_unmap_data(scmnd, target, req); 930 931 err: 932 return SCSI_MLQUEUE_HOST_BUSY; 933 } 934 935 static int srp_alloc_iu_bufs(struct srp_target_port *target) 936 { 937 int i; 938 939 for (i = 0; i < SRP_RQ_SIZE; ++i) { 940 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 941 target->max_ti_iu_len, 942 GFP_KERNEL, DMA_FROM_DEVICE); 943 if (!target->rx_ring[i]) 944 goto err; 945 } 946 947 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 948 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 949 SRP_MAX_IU_LEN, 950 GFP_KERNEL, DMA_TO_DEVICE); 951 if (!target->tx_ring[i]) 952 goto err; 953 } 954 955 return 0; 956 957 err: 958 for (i = 0; i < SRP_RQ_SIZE; ++i) { 959 srp_free_iu(target->srp_host, target->rx_ring[i]); 960 target->rx_ring[i] = NULL; 961 } 962 963 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 964 srp_free_iu(target->srp_host, target->tx_ring[i]); 965 target->tx_ring[i] = NULL; 966 } 967 968 return -ENOMEM; 969 } 970 971 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 972 struct ib_cm_event *event, 973 struct srp_target_port *target) 974 { 975 struct ib_class_port_info *cpi; 976 int opcode; 977 978 switch (event->param.rej_rcvd.reason) { 979 case IB_CM_REJ_PORT_CM_REDIRECT: 980 cpi = event->param.rej_rcvd.ari; 981 target->path.dlid = cpi->redirect_lid; 982 target->path.pkey = cpi->redirect_pkey; 983 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 984 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 985 986 target->status = target->path.dlid ? 987 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 988 break; 989 990 case IB_CM_REJ_PORT_REDIRECT: 991 if (topspin_workarounds && 992 !memcmp(&target->ioc_guid, topspin_oui, 3)) { 993 /* 994 * Topspin/Cisco SRP gateways incorrectly send 995 * reject reason code 25 when they mean 24 996 * (port redirect). 997 */ 998 memcpy(target->path.dgid.raw, 999 event->param.rej_rcvd.ari, 16); 1000 1001 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1002 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1003 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1004 1005 target->status = SRP_PORT_REDIRECT; 1006 } else { 1007 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1008 target->status = -ECONNRESET; 1009 } 1010 break; 1011 1012 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1013 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1014 target->status = -ECONNRESET; 1015 break; 1016 1017 case IB_CM_REJ_CONSUMER_DEFINED: 1018 opcode = *(u8 *) event->private_data; 1019 if (opcode == SRP_LOGIN_REJ) { 1020 struct srp_login_rej *rej = event->private_data; 1021 u32 reason = be32_to_cpu(rej->reason); 1022 1023 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1024 printk(KERN_WARNING PFX 1025 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1026 else 1027 printk(KERN_WARNING PFX 1028 "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1029 } else 1030 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1031 " opcode 0x%02x\n", opcode); 1032 target->status = -ECONNRESET; 1033 break; 1034 1035 default: 1036 printk(KERN_WARNING " REJ reason 0x%x\n", 1037 event->param.rej_rcvd.reason); 1038 target->status = -ECONNRESET; 1039 } 1040 } 1041 1042 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1043 { 1044 struct srp_target_port *target = cm_id->context; 1045 struct ib_qp_attr *qp_attr = NULL; 1046 int attr_mask = 0; 1047 int comp = 0; 1048 int opcode = 0; 1049 1050 switch (event->event) { 1051 case IB_CM_REQ_ERROR: 1052 printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); 1053 comp = 1; 1054 target->status = -ECONNRESET; 1055 break; 1056 1057 case IB_CM_REP_RECEIVED: 1058 comp = 1; 1059 opcode = *(u8 *) event->private_data; 1060 1061 if (opcode == SRP_LOGIN_RSP) { 1062 struct srp_login_rsp *rsp = event->private_data; 1063 1064 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1065 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1066 1067 target->scsi_host->can_queue = min(target->req_lim, 1068 target->scsi_host->can_queue); 1069 } else { 1070 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); 1071 target->status = -ECONNRESET; 1072 break; 1073 } 1074 1075 target->status = srp_alloc_iu_bufs(target); 1076 if (target->status) 1077 break; 1078 1079 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1080 if (!qp_attr) { 1081 target->status = -ENOMEM; 1082 break; 1083 } 1084 1085 qp_attr->qp_state = IB_QPS_RTR; 1086 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1087 if (target->status) 1088 break; 1089 1090 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1091 if (target->status) 1092 break; 1093 1094 target->status = srp_post_recv(target); 1095 if (target->status) 1096 break; 1097 1098 qp_attr->qp_state = IB_QPS_RTS; 1099 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1100 if (target->status) 1101 break; 1102 1103 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1104 if (target->status) 1105 break; 1106 1107 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1108 if (target->status) 1109 break; 1110 1111 break; 1112 1113 case IB_CM_REJ_RECEIVED: 1114 printk(KERN_DEBUG PFX "REJ received\n"); 1115 comp = 1; 1116 1117 srp_cm_rej_handler(cm_id, event, target); 1118 break; 1119 1120 case IB_CM_MRA_RECEIVED: 1121 printk(KERN_ERR PFX "MRA received\n"); 1122 break; 1123 1124 case IB_CM_DREP_RECEIVED: 1125 break; 1126 1127 case IB_CM_TIMEWAIT_EXIT: 1128 printk(KERN_ERR PFX "connection closed\n"); 1129 1130 comp = 1; 1131 target->status = 0; 1132 break; 1133 1134 default: 1135 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); 1136 break; 1137 } 1138 1139 if (comp) 1140 complete(&target->done); 1141 1142 kfree(qp_attr); 1143 1144 return 0; 1145 } 1146 1147 static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) 1148 { 1149 struct srp_target_port *target = host_to_target(scmnd->device->host); 1150 struct srp_request *req; 1151 struct srp_iu *iu; 1152 struct srp_tsk_mgmt *tsk_mgmt; 1153 int req_index; 1154 int ret = FAILED; 1155 1156 spin_lock_irq(target->scsi_host->host_lock); 1157 1158 if (scmnd->host_scribble == (void *) -1L) 1159 goto out; 1160 1161 req_index = (long) scmnd->host_scribble; 1162 printk(KERN_ERR "Abort for req_index %d\n", req_index); 1163 1164 req = &target->req_ring[req_index]; 1165 init_completion(&req->done); 1166 1167 iu = __srp_get_tx_iu(target); 1168 if (!iu) 1169 goto out; 1170 1171 tsk_mgmt = iu->buf; 1172 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1173 1174 tsk_mgmt->opcode = SRP_TSK_MGMT; 1175 tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1176 tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; 1177 tsk_mgmt->tsk_mgmt_func = func; 1178 tsk_mgmt->task_tag = req_index; 1179 1180 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1181 goto out; 1182 1183 req->tsk_mgmt = iu; 1184 1185 spin_unlock_irq(target->scsi_host->host_lock); 1186 if (!wait_for_completion_timeout(&req->done, 1187 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1188 return FAILED; 1189 spin_lock_irq(target->scsi_host->host_lock); 1190 1191 if (req->cmd_done) { 1192 list_del(&req->list); 1193 req->next = target->req_head; 1194 target->req_head = req_index; 1195 1196 scmnd->scsi_done(scmnd); 1197 } else if (!req->tsk_status) { 1198 scmnd->result = DID_ABORT << 16; 1199 ret = SUCCESS; 1200 } 1201 1202 out: 1203 spin_unlock_irq(target->scsi_host->host_lock); 1204 return ret; 1205 } 1206 1207 static int srp_abort(struct scsi_cmnd *scmnd) 1208 { 1209 printk(KERN_ERR "SRP abort called\n"); 1210 1211 return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); 1212 } 1213 1214 static int srp_reset_device(struct scsi_cmnd *scmnd) 1215 { 1216 printk(KERN_ERR "SRP reset_device called\n"); 1217 1218 return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); 1219 } 1220 1221 static int srp_reset_host(struct scsi_cmnd *scmnd) 1222 { 1223 struct srp_target_port *target = host_to_target(scmnd->device->host); 1224 int ret = FAILED; 1225 1226 printk(KERN_ERR PFX "SRP reset_host called\n"); 1227 1228 if (!srp_reconnect_target(target)) 1229 ret = SUCCESS; 1230 1231 return ret; 1232 } 1233 1234 static struct scsi_host_template srp_template = { 1235 .module = THIS_MODULE, 1236 .name = DRV_NAME, 1237 .info = srp_target_info, 1238 .queuecommand = srp_queuecommand, 1239 .eh_abort_handler = srp_abort, 1240 .eh_device_reset_handler = srp_reset_device, 1241 .eh_host_reset_handler = srp_reset_host, 1242 .can_queue = SRP_SQ_SIZE, 1243 .this_id = -1, 1244 .sg_tablesize = SRP_MAX_INDIRECT, 1245 .cmd_per_lun = SRP_SQ_SIZE, 1246 .use_clustering = ENABLE_CLUSTERING 1247 }; 1248 1249 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1250 { 1251 sprintf(target->target_name, "SRP.T10:%016llX", 1252 (unsigned long long) be64_to_cpu(target->id_ext)); 1253 1254 if (scsi_add_host(target->scsi_host, host->dev->dma_device)) 1255 return -ENODEV; 1256 1257 down(&host->target_mutex); 1258 list_add_tail(&target->list, &host->target_list); 1259 up(&host->target_mutex); 1260 1261 target->state = SRP_TARGET_LIVE; 1262 1263 /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */ 1264 scsi_scan_target(&target->scsi_host->shost_gendev, 1265 0, target->scsi_id, ~0, 0); 1266 1267 return 0; 1268 } 1269 1270 static void srp_release_class_dev(struct class_device *class_dev) 1271 { 1272 struct srp_host *host = 1273 container_of(class_dev, struct srp_host, class_dev); 1274 1275 complete(&host->released); 1276 } 1277 1278 static struct class srp_class = { 1279 .name = "infiniband_srp", 1280 .release = srp_release_class_dev 1281 }; 1282 1283 /* 1284 * Target ports are added by writing 1285 * 1286 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1287 * pkey=<P_Key>,service_id=<service ID> 1288 * 1289 * to the add_target sysfs attribute. 1290 */ 1291 enum { 1292 SRP_OPT_ERR = 0, 1293 SRP_OPT_ID_EXT = 1 << 0, 1294 SRP_OPT_IOC_GUID = 1 << 1, 1295 SRP_OPT_DGID = 1 << 2, 1296 SRP_OPT_PKEY = 1 << 3, 1297 SRP_OPT_SERVICE_ID = 1 << 4, 1298 SRP_OPT_MAX_SECT = 1 << 5, 1299 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1300 SRP_OPT_IOC_GUID | 1301 SRP_OPT_DGID | 1302 SRP_OPT_PKEY | 1303 SRP_OPT_SERVICE_ID), 1304 }; 1305 1306 static match_table_t srp_opt_tokens = { 1307 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1308 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1309 { SRP_OPT_DGID, "dgid=%s" }, 1310 { SRP_OPT_PKEY, "pkey=%x" }, 1311 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1312 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1313 { SRP_OPT_ERR, NULL } 1314 }; 1315 1316 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1317 { 1318 char *options, *sep_opt; 1319 char *p; 1320 char dgid[3]; 1321 substring_t args[MAX_OPT_ARGS]; 1322 int opt_mask = 0; 1323 int token; 1324 int ret = -EINVAL; 1325 int i; 1326 1327 options = kstrdup(buf, GFP_KERNEL); 1328 if (!options) 1329 return -ENOMEM; 1330 1331 sep_opt = options; 1332 while ((p = strsep(&sep_opt, ",")) != NULL) { 1333 if (!*p) 1334 continue; 1335 1336 token = match_token(p, srp_opt_tokens, args); 1337 opt_mask |= token; 1338 1339 switch (token) { 1340 case SRP_OPT_ID_EXT: 1341 p = match_strdup(args); 1342 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1343 kfree(p); 1344 break; 1345 1346 case SRP_OPT_IOC_GUID: 1347 p = match_strdup(args); 1348 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1349 kfree(p); 1350 break; 1351 1352 case SRP_OPT_DGID: 1353 p = match_strdup(args); 1354 if (strlen(p) != 32) { 1355 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1356 goto out; 1357 } 1358 1359 for (i = 0; i < 16; ++i) { 1360 strlcpy(dgid, p + i * 2, 3); 1361 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1362 } 1363 break; 1364 1365 case SRP_OPT_PKEY: 1366 if (match_hex(args, &token)) { 1367 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1368 goto out; 1369 } 1370 target->path.pkey = cpu_to_be16(token); 1371 break; 1372 1373 case SRP_OPT_SERVICE_ID: 1374 p = match_strdup(args); 1375 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1376 kfree(p); 1377 break; 1378 1379 case SRP_OPT_MAX_SECT: 1380 if (match_int(args, &token)) { 1381 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1382 goto out; 1383 } 1384 target->scsi_host->max_sectors = token; 1385 break; 1386 1387 default: 1388 printk(KERN_WARNING PFX "unknown parameter or missing value " 1389 "'%s' in target creation request\n", p); 1390 goto out; 1391 } 1392 } 1393 1394 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1395 ret = 0; 1396 else 1397 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1398 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1399 !(srp_opt_tokens[i].token & opt_mask)) 1400 printk(KERN_WARNING PFX "target creation request is " 1401 "missing parameter '%s'\n", 1402 srp_opt_tokens[i].pattern); 1403 1404 out: 1405 kfree(options); 1406 return ret; 1407 } 1408 1409 static ssize_t srp_create_target(struct class_device *class_dev, 1410 const char *buf, size_t count) 1411 { 1412 struct srp_host *host = 1413 container_of(class_dev, struct srp_host, class_dev); 1414 struct Scsi_Host *target_host; 1415 struct srp_target_port *target; 1416 int ret; 1417 int i; 1418 1419 target_host = scsi_host_alloc(&srp_template, 1420 sizeof (struct srp_target_port)); 1421 if (!target_host) 1422 return -ENOMEM; 1423 1424 target_host->max_lun = SRP_MAX_LUN; 1425 1426 target = host_to_target(target_host); 1427 memset(target, 0, sizeof *target); 1428 1429 target->scsi_host = target_host; 1430 target->srp_host = host; 1431 1432 INIT_WORK(&target->work, srp_reconnect_work, target); 1433 1434 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 1435 target->req_ring[i].next = i + 1; 1436 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 1437 INIT_LIST_HEAD(&target->req_queue); 1438 1439 ret = srp_parse_options(buf, target); 1440 if (ret) 1441 goto err; 1442 1443 ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid); 1444 1445 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1446 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1447 (unsigned long long) be64_to_cpu(target->id_ext), 1448 (unsigned long long) be64_to_cpu(target->ioc_guid), 1449 be16_to_cpu(target->path.pkey), 1450 (unsigned long long) be64_to_cpu(target->service_id), 1451 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]), 1452 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]), 1453 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]), 1454 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]), 1455 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]), 1456 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]), 1457 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]), 1458 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14])); 1459 1460 ret = srp_create_target_ib(target); 1461 if (ret) 1462 goto err; 1463 1464 target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target); 1465 if (IS_ERR(target->cm_id)) { 1466 ret = PTR_ERR(target->cm_id); 1467 goto err_free; 1468 } 1469 1470 ret = srp_connect_target(target); 1471 if (ret) { 1472 printk(KERN_ERR PFX "Connection failed\n"); 1473 goto err_cm_id; 1474 } 1475 1476 ret = srp_add_target(host, target); 1477 if (ret) 1478 goto err_disconnect; 1479 1480 return count; 1481 1482 err_disconnect: 1483 srp_disconnect_target(target); 1484 1485 err_cm_id: 1486 ib_destroy_cm_id(target->cm_id); 1487 1488 err_free: 1489 srp_free_target_ib(target); 1490 1491 err: 1492 scsi_host_put(target_host); 1493 1494 return ret; 1495 } 1496 1497 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 1498 1499 static ssize_t show_ibdev(struct class_device *class_dev, char *buf) 1500 { 1501 struct srp_host *host = 1502 container_of(class_dev, struct srp_host, class_dev); 1503 1504 return sprintf(buf, "%s\n", host->dev->name); 1505 } 1506 1507 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1508 1509 static ssize_t show_port(struct class_device *class_dev, char *buf) 1510 { 1511 struct srp_host *host = 1512 container_of(class_dev, struct srp_host, class_dev); 1513 1514 return sprintf(buf, "%d\n", host->port); 1515 } 1516 1517 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1518 1519 static struct srp_host *srp_add_port(struct ib_device *device, 1520 __be64 node_guid, u8 port) 1521 { 1522 struct srp_host *host; 1523 1524 host = kzalloc(sizeof *host, GFP_KERNEL); 1525 if (!host) 1526 return NULL; 1527 1528 INIT_LIST_HEAD(&host->target_list); 1529 init_MUTEX(&host->target_mutex); 1530 init_completion(&host->released); 1531 host->dev = device; 1532 host->port = port; 1533 1534 host->initiator_port_id[7] = port; 1535 memcpy(host->initiator_port_id + 8, &node_guid, 8); 1536 1537 host->pd = ib_alloc_pd(device); 1538 if (IS_ERR(host->pd)) 1539 goto err_free; 1540 1541 host->mr = ib_get_dma_mr(host->pd, 1542 IB_ACCESS_LOCAL_WRITE | 1543 IB_ACCESS_REMOTE_READ | 1544 IB_ACCESS_REMOTE_WRITE); 1545 if (IS_ERR(host->mr)) 1546 goto err_pd; 1547 1548 host->class_dev.class = &srp_class; 1549 host->class_dev.dev = device->dma_device; 1550 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", 1551 device->name, port); 1552 1553 if (class_device_register(&host->class_dev)) 1554 goto err_mr; 1555 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) 1556 goto err_class; 1557 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) 1558 goto err_class; 1559 if (class_device_create_file(&host->class_dev, &class_device_attr_port)) 1560 goto err_class; 1561 1562 return host; 1563 1564 err_class: 1565 class_device_unregister(&host->class_dev); 1566 1567 err_mr: 1568 ib_dereg_mr(host->mr); 1569 1570 err_pd: 1571 ib_dealloc_pd(host->pd); 1572 1573 err_free: 1574 kfree(host); 1575 1576 return NULL; 1577 } 1578 1579 static void srp_add_one(struct ib_device *device) 1580 { 1581 struct list_head *dev_list; 1582 struct srp_host *host; 1583 struct ib_device_attr *dev_attr; 1584 int s, e, p; 1585 1586 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 1587 if (!dev_attr) 1588 return; 1589 1590 if (ib_query_device(device, dev_attr)) { 1591 printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n", 1592 device->name); 1593 goto out; 1594 } 1595 1596 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1597 if (!dev_list) 1598 goto out; 1599 1600 INIT_LIST_HEAD(dev_list); 1601 1602 if (device->node_type == IB_NODE_SWITCH) { 1603 s = 0; 1604 e = 0; 1605 } else { 1606 s = 1; 1607 e = device->phys_port_cnt; 1608 } 1609 1610 for (p = s; p <= e; ++p) { 1611 host = srp_add_port(device, dev_attr->node_guid, p); 1612 if (host) 1613 list_add_tail(&host->list, dev_list); 1614 } 1615 1616 ib_set_client_data(device, &srp_client, dev_list); 1617 1618 out: 1619 kfree(dev_attr); 1620 } 1621 1622 static void srp_remove_one(struct ib_device *device) 1623 { 1624 struct list_head *dev_list; 1625 struct srp_host *host, *tmp_host; 1626 LIST_HEAD(target_list); 1627 struct srp_target_port *target, *tmp_target; 1628 unsigned long flags; 1629 1630 dev_list = ib_get_client_data(device, &srp_client); 1631 1632 list_for_each_entry_safe(host, tmp_host, dev_list, list) { 1633 class_device_unregister(&host->class_dev); 1634 /* 1635 * Wait for the sysfs entry to go away, so that no new 1636 * target ports can be created. 1637 */ 1638 wait_for_completion(&host->released); 1639 1640 /* 1641 * Mark all target ports as removed, so we stop queueing 1642 * commands and don't try to reconnect. 1643 */ 1644 down(&host->target_mutex); 1645 list_for_each_entry_safe(target, tmp_target, 1646 &host->target_list, list) { 1647 spin_lock_irqsave(target->scsi_host->host_lock, flags); 1648 if (target->state != SRP_TARGET_REMOVED) 1649 target->state = SRP_TARGET_REMOVED; 1650 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 1651 } 1652 up(&host->target_mutex); 1653 1654 /* 1655 * Wait for any reconnection tasks that may have 1656 * started before we marked our target ports as 1657 * removed, and any target port removal tasks. 1658 */ 1659 flush_scheduled_work(); 1660 1661 list_for_each_entry_safe(target, tmp_target, 1662 &host->target_list, list) { 1663 scsi_remove_host(target->scsi_host); 1664 srp_disconnect_target(target); 1665 ib_destroy_cm_id(target->cm_id); 1666 srp_free_target_ib(target); 1667 scsi_host_put(target->scsi_host); 1668 } 1669 1670 ib_dereg_mr(host->mr); 1671 ib_dealloc_pd(host->pd); 1672 kfree(host); 1673 } 1674 1675 kfree(dev_list); 1676 } 1677 1678 static int __init srp_init_module(void) 1679 { 1680 int ret; 1681 1682 ret = class_register(&srp_class); 1683 if (ret) { 1684 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 1685 return ret; 1686 } 1687 1688 ret = ib_register_client(&srp_client); 1689 if (ret) { 1690 printk(KERN_ERR PFX "couldn't register IB client\n"); 1691 class_unregister(&srp_class); 1692 return ret; 1693 } 1694 1695 return 0; 1696 } 1697 1698 static void __exit srp_cleanup_module(void) 1699 { 1700 ib_unregister_client(&srp_client); 1701 class_unregister(&srp_class); 1702 } 1703 1704 module_init(srp_init_module); 1705 module_exit(srp_cleanup_module); 1706